AWSTranscribeStreamingStartStreamTranscriptionRequest
Objective-C
@interface AWSTranscribeStreamingStartStreamTranscriptionRequest
Swift
class AWSTranscribeStreamingStartStreamTranscriptionRequest
-
PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP2 data frame.
Declaration
Objective-C
@property (nonatomic, strong) AWSTranscribeStreamingAudioStream *_Nullable audioStream;
Swift
var audioStream: AWSTranscribeStreamingAudioStream? { get set }
-
Indicates the language used in the input audio stream.
Declaration
Objective-C
@property (nonatomic) AWSTranscribeStreamingLanguageCode languageCode;
Swift
var languageCode: AWSTranscribeStreamingLanguageCode { get set }
-
The encoding used for the input audio.
Declaration
Objective-C
@property (nonatomic) AWSTranscribeStreamingMediaEncoding mediaEncoding;
Swift
var mediaEncoding: AWSTranscribeStreamingMediaEncoding { get set }
-
The sample rate, in Hertz, of the input audio. We suggest that you use 8000 Hz for low quality audio and 16000 Hz for high quality audio.
Declaration
Objective-C
@property (nonatomic, strong) NSNumber *_Nullable mediaSampleRateHertz;
Swift
var mediaSampleRateHertz: NSNumber? { get set }
-
A identifier for the transcription session. Use this parameter when you want to retry a session. If you don’t provide a session ID, Amazon Transcribe will generate one for you and return it in the response.
Declaration
Objective-C
@property (nonatomic, strong) NSString *_Nullable sessionId;
Swift
var sessionId: String? { get set }
-
The name of the vocabulary to use when processing the transcription job.
Declaration
Objective-C
@property (nonatomic, strong) NSString *_Nullable vocabularyName;
Swift
var vocabularyName: String? { get set }