AWSRekognitionGetLabelDetectionResponse
Objective-C
@interface AWSRekognitionGetLabelDetectionResponse
Swift
class AWSRekognitionGetLabelDetectionResponse
-
The current status of the label detection job.
Declaration
Objective-C
@property (nonatomic) AWSRekognitionVideoJobStatus jobStatus;
Swift
var jobStatus: AWSRekognitionVideoJobStatus { get set }
-
Version number of the label detection model that was used to detect labels.
Declaration
Objective-C
@property (nonatomic, strong) NSString *_Nullable labelModelVersion;
Swift
var labelModelVersion: String? { get set }
-
An array of labels detected in the video. Each element contains the detected label and the time, in milliseconds from the start of the video, that the label was detected.
Declaration
Objective-C
@property (nonatomic, strong) NSArray<AWSRekognitionLabelDetection *> *_Nullable labels;
Swift
var labels: [AWSRekognitionLabelDetection]? { get set }
-
If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of labels.
Declaration
Objective-C
@property (nonatomic, strong) NSString *_Nullable nextToken;
Swift
var nextToken: String? { get set }
-
If the job fails,
StatusMessage
provides a descriptive error message.Declaration
Objective-C
@property (nonatomic, strong) NSString *_Nullable statusMessage;
Swift
var statusMessage: String? { get set }
-
Information about a video that Amazon Rekognition Video analyzed.
Videometadata
is returned in every page of paginated responses from a Amazon Rekognition video operation.Declaration
Objective-C
@property (nonatomic, strong) AWSRekognitionVideoMetadata *_Nullable videoMetadata;
Swift
var videoMetadata: AWSRekognitionVideoMetadata? { get set }