diff --git a/AWSComprehend/AWSComprehendModel.h b/AWSComprehend/AWSComprehendModel.h index 67211e09bfe..ca7d63ac19b 100644 --- a/AWSComprehend/AWSComprehendModel.h +++ b/AWSComprehend/AWSComprehendModel.h @@ -369,6 +369,17 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { AWSComprehendTargetedSentimentEntityTypeOther, }; +typedef NS_ENUM(NSInteger, AWSComprehendToxicContentType) { + AWSComprehendToxicContentTypeUnknown, + AWSComprehendToxicContentTypeGraphic, + AWSComprehendToxicContentTypeHarassmentOrAbuse, + AWSComprehendToxicContentTypeHateSpeech, + AWSComprehendToxicContentTypeInsult, + AWSComprehendToxicContentTypeProfanity, + AWSComprehendToxicContentTypeSexual, + AWSComprehendToxicContentTypeViolenceOrThreat, +}; + @class AWSComprehendAugmentedManifestsListItem; @class AWSComprehendBatchDetectDominantLanguageItemResult; @class AWSComprehendBatchDetectDominantLanguageRequest; @@ -475,6 +486,8 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @class AWSComprehendDetectSyntaxResponse; @class AWSComprehendDetectTargetedSentimentRequest; @class AWSComprehendDetectTargetedSentimentResponse; +@class AWSComprehendDetectToxicContentRequest; +@class AWSComprehendDetectToxicContentResponse; @class AWSComprehendDocumentClass; @class AWSComprehendDocumentClassificationConfig; @class AWSComprehendDocumentClassificationJobFilter; @@ -628,8 +641,11 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @class AWSComprehendTargetedSentimentEntity; @class AWSComprehendTargetedSentimentMention; @class AWSComprehendTaskConfig; +@class AWSComprehendTextSegment; @class AWSComprehendTopicsDetectionJobFilter; @class AWSComprehendTopicsDetectionJobProperties; +@class AWSComprehendToxicContent; +@class AWSComprehendToxicLabels; @class AWSComprehendUntagResourceRequest; @class AWSComprehendUntagResourceResponse; @class AWSComprehendUpdateDataSecurityConfig; @@ -1226,7 +1242,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { /** -
Use the Bytes
parameter to input a text, PDF, Word or image file. You can also use the Bytes
parameter to input an Amazon Textract DetectDocumentText
or AnalyzeDocument
output file.
Provide the input document as a sequence of base64-encoded bytes. If your code uses an Amazon Web Services SDK to classify documents, the SDK may encode the document file bytes for you.
The maximum length of this field depends on the input document type. For details, see Inputs for real-time custom analysis in the Comprehend Developer Guide.
If you use the Bytes
parameter, do not use the Text
parameter.
Use the Bytes
parameter to input a text, PDF, Word or image file.
When you classify a document using a custom model, you can also use the Bytes
parameter to input an Amazon Textract DetectDocumentText
or AnalyzeDocument
output file.
To classify a document using the prompt safety classifier, use the Text
parameter for input.
Provide the input document as a sequence of base64-encoded bytes. If your code uses an Amazon Web Services SDK to classify documents, the SDK may encode the document file bytes for you.
The maximum length of this field depends on the input document type. For details, see Inputs for real-time custom analysis in the Comprehend Developer Guide.
If you use the Bytes
parameter, do not use the Text
parameter.
The Amazon Resource Number (ARN) of the endpoint. For information about endpoints, see Managing endpoints.
+The Amazon Resource Number (ARN) of the endpoint.
For prompt safety classification, Amazon Comprehend provides the endpoint ARN. For more information about prompt safety classifiers, see Prompt safety classification in the Amazon Comprehend Developer Guide
For custom classification, you create an endpoint for your custom model. For more information, see Using Amazon Comprehend endpoints.
*/ @property (nonatomic, strong) NSString * _Nullable endpointArn; @@ -1254,7 +1270,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { /** -The classes used by the document being analyzed. These are used for multi-class trained models. Individual classes are mutually exclusive and each document is expected to have only a single class assigned to it. For example, an animal can be a dog or a cat, but not both at the same time.
+The classes used by the document being analyzed. These are used for models trained in multi-class mode. Individual classes are mutually exclusive and each document is expected to have only a single class assigned to it. For example, an animal can be a dog or a cat, but not both at the same time.
For prompt safety classification, the response includes only two classes (SAFE_PROMPT and UNSAFE_PROMPT), along with a confidence score for each class. The value range of the score is zero to one, where one is the highest confidence.
*/ @property (nonatomic, strong) NSArrayThe labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
+The labels used in the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
*/ @property (nonatomic, strong) NSArrayIndicates the mode in which the classifier will be trained. The classifier can be trained in multi-class mode, which identifies one and only one class for each document, or multi-label mode, which identifies one or more labels for each document. In multi-label mode, multiple labels for an individual document are separated by a delimiter. The default delimiter between labels is a pipe (|).
+Indicates the mode in which the classifier will be trained. The classifier can be trained in multi-class (single-label) mode or multi-label mode. Multi-class mode identifies a single class label for each document and multi-label mode identifies one or more class labels for each document. Multiple labels for an individual document are separated by a delimiter. The default delimiter between labels is a pipe (|).
*/ @property (nonatomic, assign) AWSComprehendDocumentClassifierMode mode; @@ -1419,7 +1435,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @property (nonatomic, strong) NSString * _Nullable modelPolicy; /** -Specifies the location for the output files from a custom classifier job. This parameter is required for a request that creates a native classifier model.
+Specifies the location for the output files from a custom classifier job. This parameter is required for a request that creates a native document model.
*/ @property (nonatomic, strong) AWSComprehendDocumentClassifierOutputDataConfig * _Nullable outputDataConfig; @@ -1602,7 +1618,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { /** -To associate an existing model with the flywheel, specify the Amazon Resource Number (ARN) of the model version.
+To associate an existing model with the flywheel, specify the Amazon Resource Number (ARN) of the model version. Do not set TaskConfig
or ModelType
if you specify an ActiveModelArn
.
The model type.
+The model type. You need to set ModelType
if you are creating a flywheel for a new model.
Configuration about the custom classifier associated with the flywheel.
+Configuration about the model associated with the flywheel. You need to set TaskConfig
if you are creating a flywheel for a new model.
The language of the input text. Currently, English is the only supported language.
+ */ +@property (nonatomic, assign) AWSComprehendLanguageCode languageCode; + +/** +A list of up to 10 text strings. Each string has a maximum size of 1 KB, and the maximum size of the list is 10 KB.
+ */ +@property (nonatomic, strong) NSArrayResults of the content moderation analysis. Each entry in the results list contains a list of toxic content types identified in the text, along with a confidence score for each content type. The results list also includes a toxicity score for each entry in the results list.
+ */ +@property (nonatomic, strong) NSArraySpecifies the class that categorizes the document being analyzed
*/ @@ -2754,7 +2801,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end /** -Configuration required for a custom classification model.
+Configuration required for a document classification model.
Required parameters: [Mode] */ @interface AWSComprehendDocumentClassificationConfig : AWSModel @@ -2872,14 +2919,14 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @property (nonatomic, strong) NSString * _Nullable volumeKmsKeyId; /** -Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your document classification job. For more information, see Amazon VPC.
+Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your document classification job. For more information, see Amazon VPC.
*/ @property (nonatomic, strong) AWSComprehendVpcConfig * _Nullable vpcConfig; @end /** -The location of the training documents. This parameter is required in a request to create a native classifier model.
+The location of the training documents. This parameter is required in a request to create a semi-structured document classification model.
Required parameters: [S3Uri] */ @interface AWSComprehendDocumentClassifierDocuments : AWSModel @@ -2947,12 +2994,12 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @property (nonatomic, strong) AWSComprehendDocumentReaderConfig * _Nullable documentReaderConfig; /** -The type of input documents for training the model. Provide plain-text documents to create a plain-text model, and provide semi-structured documents to create a native model.
+The type of input documents for training the model. Provide plain-text documents to create a plain-text model, and provide semi-structured documents to create a native document model.
*/ @property (nonatomic, assign) AWSComprehendDocumentClassifierDocumentTypeFormat documentType; /** -The S3 location of the training documents. This parameter is required in a request to create a native classifier model.
+The S3 location of the training documents. This parameter is required in a request to create a native document model.
*/ @property (nonatomic, strong) AWSComprehendDocumentClassifierDocuments * _Nullable documents; @@ -2967,14 +3014,14 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @property (nonatomic, strong) NSString * _Nullable s3Uri; /** -This specifies the Amazon S3 location where the test annotations for an entity recognizer are located. The URI must be in the same Amazon Web Services Region as the API endpoint that you are calling.
+This specifies the Amazon S3 location that contains the test annotations for the document classifier. The URI must be in the same Amazon Web Services Region as the API endpoint that you are calling.
*/ @property (nonatomic, strong) NSString * _Nullable testS3Uri; @end /** -Provide the location for output data from a custom classifier job. This field is mandatory if you are training a native classifier model.
+Provide the location for output data from a custom classifier job. This field is mandatory if you are training a native document model.
*/ @interface AWSComprehendDocumentClassifierOutputDataConfig : AWSModel @@ -3093,7 +3140,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @property (nonatomic, strong) NSString * _Nullable volumeKmsKeyId; /** -Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.
+Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.
*/ @property (nonatomic, strong) AWSComprehendVpcConfig * _Nullable vpcConfig; @@ -3191,7 +3238,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @property (nonatomic, assign) AWSComprehendDocumentReadMode documentReadMode; /** -Specifies the type of Amazon Textract features to apply. If you chose TEXTRACT_ANALYZE_DOCUMENT
as the read action, you must specify one or both of the following values:
TABLES
- Returns information about any tables that are detected in the input document.
FORMS
- Returns information and the data from any forms that are detected in the input document.
Specifies the type of Amazon Textract features to apply. If you chose TEXTRACT_ANALYZE_DOCUMENT
as the read action, you must specify one or both of the following values:
TABLES
- Returns additional information about any tables that are detected in the input document.
FORMS
- Returns additional information about any forms that are detected in the input document.
An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.
Entity types must not contain the following invalid characters: \n (line break), \\n (escaped line break, \r (carriage return), \\r (escaped carriage return), \t (tab), \\t (escaped tab), space, and , (comma).
+An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.
Entity types must not contain the following invalid characters: \n (line break), \\n (escaped line break, \r (carriage return), \\r (escaped carriage return), \t (tab), \\t (escaped tab), and , (comma).
*/ @property (nonatomic, strong) NSString * _Nullable types; @@ -4316,7 +4363,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @property (nonatomic, assign) AWSComprehendFlywheelStatus status; /** -Configuration about the custom classifier associated with the flywheel.
+Configuration about the model associated with a flywheel.
*/ @property (nonatomic, strong) AWSComprehendTaskConfig * _Nullable taskConfig; @@ -4469,13 +4516,13 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end /** -Provides additional detail about why the request failed:
Document size is too large - Check the size of your file and resubmit the request.
Document type is not supported - Check the file type and resubmit the request.
Too many pages in the document - Check the number of pages in your file and resubmit the request.
Access denied to Amazon Textract - Verify that your account has permission to use Amazon Textract API operations and resubmit the request.
Provides additional detail about why the request failed.
*/ @interface AWSComprehendInvalidRequestDetail : AWSModel /** -Reason code is INVALID_DOCUMENT
.
Reason codes include the following values:
DOCUMENT_SIZE_EXCEEDED - Document size is too large. Check the size of your file and resubmit the request.
UNSUPPORTED_DOC_TYPE - Document type is not supported. Check the file type and resubmit the request.
PAGE_LIMIT_EXCEEDED - Too many pages in the document. Check the number of pages in your file and resubmit the request.
TEXTRACT_ACCESS_DENIED - Access denied to Amazon Textract. Verify that your account has permission to use Amazon Textract API operations and resubmit the request.
NOT_TEXTRACT_JSON - Document is not Amazon Textract JSON format. Verify the format and resubmit the request.
MISMATCHED_TOTAL_PAGE_COUNT - Check the number of pages in your file and resubmit the request.
INVALID_DOCUMENT - Invalid document. Check the file and resubmit the request.
Contains the sentiment and sentiment score for one mention of an entity.
For more information about targeted sentiment, see Targeted sentiment.
+Contains the sentiment and sentiment score for one mention of an entity.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
*/ @interface AWSComprehendMentionSentiment : AWSModel @@ -5364,7 +5411,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { /** -ID for the Amazon Web Services Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
KMS Key Alias: "alias/ExampleAlias"
ARN of a KMS Key Alias: "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"
ID for the Amazon Web Services Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. Specify the Key Id of a symmetric key, because you cannot use an asymmetric key for uploading data to S3.
The KmsKeyId can be one of the following formats:
KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
Amazon Resource Name (ARN) of a KMS Key: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
KMS Key Alias: "alias/ExampleAlias"
ARN of a KMS Key Alias: "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"
The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. For more information, see Role-based permissions.
+The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. For more information, see Role-based permissions.
*/ @property (nonatomic, strong) NSString * _Nullable dataAccessRoleArn; @@ -6939,7 +6986,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end /** -Information about one of the entities found by targeted sentiment analysis.
For more information about targeted sentiment, see Targeted sentiment.
+Information about one of the entities found by targeted sentiment analysis.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
*/ @interface AWSComprehendTargetedSentimentEntity : AWSModel @@ -6957,7 +7004,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end /** -Information about one mention of an entity. The mention information includes the location of the mention in the text and the sentiment of the mention.
For more information about targeted sentiment, see Targeted sentiment.
+Information about one mention of an entity. The mention information includes the location of the mention in the text and the sentiment of the mention.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
*/ @interface AWSComprehendTargetedSentimentMention : AWSModel @@ -7000,14 +7047,14 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end /** -Configuration about the custom classifier associated with the flywheel.
+Configuration about the model associated with a flywheel.
Required parameters: [LanguageCode] */ @interface AWSComprehendTaskConfig : AWSModel /** -Configuration required for a classification model.
+Configuration required for a document classification model.
*/ @property (nonatomic, strong) AWSComprehendDocumentClassificationConfig * _Nullable documentClassificationConfig; @@ -7023,6 +7070,20 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end +/** +One of the of text strings. Each string has a size limit of 1KB.
+ Required parameters: [Text] + */ +@interface AWSComprehendTextSegment : AWSModel + + +/** +The text content.
+ */ +@property (nonatomic, strong) NSString * _Nullable text; + +@end + /**Provides information for filtering topic detection jobs. For more information, see .
*/ @@ -7124,6 +7185,42 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end +/** +Toxic content analysis result for one string. For more information about toxicity detection, see Toxicity detection in the Amazon Comprehend Developer Guide
+ */ +@interface AWSComprehendToxicContent : AWSModel + + +/** +The name of the toxic content type.
+ */ +@property (nonatomic, assign) AWSComprehendToxicContentType name; + +/** +Model confidence in the detected content type. Value range is zero to one, where one is highest confidence.
+ */ +@property (nonatomic, strong) NSNumber * _Nullable score; + +@end + +/** +Toxicity analysis result for one string. For more information about toxicity detection, see Toxicity detection in the Amazon Comprehend Developer Guide.
+ */ +@interface AWSComprehendToxicLabels : AWSModel + + +/** +Array of toxic content types identified in the string.
+ */ +@property (nonatomic, strong) NSArrayOverall toxicity score for the string. Value range is zero to one, where one is the highest confidence.
+ */ +@property (nonatomic, strong) NSNumber * _Nullable toxicity; + +@end + /** */ @@ -7280,7 +7377,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendTargetedSentimentEntityType) { @end /** -The system identified one of the following warnings while processing the input document:
The document to classify is plain text, but the classifier is a native model.
The document to classify is semi-structured, but the classifier is a plain-text model.
The system identified one of the following warnings while processing the input document:
The document to classify is plain text, but the classifier is a native document model.
The document to classify is semi-structured, but the classifier is a plain-text model.
Inspects a batch of documents and returns a sentiment analysis for each entity identified in the documents.
For more information about targeted sentiment, see Targeted sentiment.
\"\ + \"documentation\":\"Inspects a batch of documents and returns a sentiment analysis for each entity identified in the documents.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
\"\ },\ \"ClassifyDocument\":{\ \"name\":\"ClassifyDocument\",\ @@ -186,7 +186,7 @@ - (NSString *)definitionString { {\"shape\":\"TextSizeLimitExceededException\"},\ {\"shape\":\"InternalServerException\"}\ ],\ - \"documentation\":\"Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
You can input plain text or you can upload a single-page input document (text, PDF, Word, or image).
If the system detects errors while processing a page in the input document, the API response includes an entry in Errors
that describes the errors.
If the system detects a document-level error in your input document, the API returns an InvalidRequestException
error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
Creates a classification request to analyze a single document in real-time. ClassifyDocument
supports the following model types:
Custom classifier - a custom model that you have created and trained. For input, you can provide plain text, a single-page document (PDF, Word, or image), or Amazon Textract API output. For more information, see Custom classification in the Amazon Comprehend Developer Guide.
Prompt safety classifier - Amazon Comprehend provides a pre-trained model for classifying input prompts for generative AI applications. For input, you provide English plain text input. For prompt safety classification, the response includes only the Classes
field. For more information about prompt safety classifiers, see Prompt safety classification in the Amazon Comprehend Developer Guide.
If the system detects errors while processing a page in the input document, the API response includes an Errors
field that describes the errors.
If the system detects a document-level error in your input document, the API returns an InvalidRequestException
error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
Inspects the input text and returns a sentiment analysis for each entity identified in the text.
For more information about targeted sentiment, see Targeted sentiment.
\"\ + \"documentation\":\"Inspects the input text and returns a sentiment analysis for each entity identified in the text.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
\"\ + },\ + \"DetectToxicContent\":{\ + \"name\":\"DetectToxicContent\",\ + \"http\":{\ + \"method\":\"POST\",\ + \"requestUri\":\"/\"\ + },\ + \"input\":{\"shape\":\"DetectToxicContentRequest\"},\ + \"output\":{\"shape\":\"DetectToxicContentResponse\"},\ + \"errors\":[\ + {\"shape\":\"InvalidRequestException\"},\ + {\"shape\":\"TextSizeLimitExceededException\"},\ + {\"shape\":\"UnsupportedLanguageException\"},\ + {\"shape\":\"InternalServerException\"}\ + ],\ + \"documentation\":\"Performs toxicity analysis on the list of text strings that you provide as input. The API response contains a results list that matches the size of the input list. For more information about toxicity detection, see Toxicity detection in the Amazon Comprehend Developer Guide.
\"\ },\ \"ImportModel\":{\ \"name\":\"ImportModel\",\ @@ -1098,7 +1114,7 @@ - (NSString *)definitionString { {\"shape\":\"ResourceInUseException\"},\ {\"shape\":\"InternalServerException\"}\ ],\ - \"documentation\":\"Starts an asynchronous document classification job. Use the DescribeDocumentClassificationJob
operation to track the progress of the job.
Starts an asynchronous document classification job using a custom classification model. Use the DescribeDocumentClassificationJob
operation to track the progress of the job.
The Amazon Resource Number (ARN) of the endpoint. For information about endpoints, see Managing endpoints.
\"\ + \"documentation\":\"The Amazon Resource Number (ARN) of the endpoint.
For prompt safety classification, Amazon Comprehend provides the endpoint ARN. For more information about prompt safety classifiers, see Prompt safety classification in the Amazon Comprehend Developer Guide
For custom classification, you create an endpoint for your custom model. For more information, see Using Amazon Comprehend endpoints.
\"\ },\ \"Bytes\":{\ \"shape\":\"SemiStructuredDocumentBlob\",\ - \"documentation\":\"Use the Bytes
parameter to input a text, PDF, Word or image file. You can also use the Bytes
parameter to input an Amazon Textract DetectDocumentText
or AnalyzeDocument
output file.
Provide the input document as a sequence of base64-encoded bytes. If your code uses an Amazon Web Services SDK to classify documents, the SDK may encode the document file bytes for you.
The maximum length of this field depends on the input document type. For details, see Inputs for real-time custom analysis in the Comprehend Developer Guide.
If you use the Bytes
parameter, do not use the Text
parameter.
Use the Bytes
parameter to input a text, PDF, Word or image file.
When you classify a document using a custom model, you can also use the Bytes
parameter to input an Amazon Textract DetectDocumentText
or AnalyzeDocument
output file.
To classify a document using the prompt safety classifier, use the Text
parameter for input.
Provide the input document as a sequence of base64-encoded bytes. If your code uses an Amazon Web Services SDK to classify documents, the SDK may encode the document file bytes for you.
The maximum length of this field depends on the input document type. For details, see Inputs for real-time custom analysis in the Comprehend Developer Guide.
If you use the Bytes
parameter, do not use the Text
parameter.
The classes used by the document being analyzed. These are used for multi-class trained models. Individual classes are mutually exclusive and each document is expected to have only a single class assigned to it. For example, an animal can be a dog or a cat, but not both at the same time.
\"\ + \"documentation\":\"The classes used by the document being analyzed. These are used for models trained in multi-class mode. Individual classes are mutually exclusive and each document is expected to have only a single class assigned to it. For example, an animal can be a dog or a cat, but not both at the same time.
For prompt safety classification, the response includes only two classes (SAFE_PROMPT and UNSAFE_PROMPT), along with a confidence score for each class. The value range of the score is zero to one, where one is the highest confidence.
\"\ },\ \"Labels\":{\ \"shape\":\"ListOfLabels\",\ - \"documentation\":\"The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
\"\ + \"documentation\":\"The labels used in the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
\"\ },\ \"DocumentMetadata\":{\ \"shape\":\"DocumentMetadata\",\ @@ -2211,7 +2227,7 @@ - (NSString *)definitionString { },\ \"OutputDataConfig\":{\ \"shape\":\"DocumentClassifierOutputDataConfig\",\ - \"documentation\":\"Specifies the location for the output files from a custom classifier job. This parameter is required for a request that creates a native classifier model.
\"\ + \"documentation\":\"Specifies the location for the output files from a custom classifier job. This parameter is required for a request that creates a native document model.
\"\ },\ \"ClientRequestToken\":{\ \"shape\":\"ClientRequestTokenString\",\ @@ -2232,7 +2248,7 @@ - (NSString *)definitionString { },\ \"Mode\":{\ \"shape\":\"DocumentClassifierMode\",\ - \"documentation\":\"Indicates the mode in which the classifier will be trained. The classifier can be trained in multi-class mode, which identifies one and only one class for each document, or multi-label mode, which identifies one or more labels for each document. In multi-label mode, multiple labels for an individual document are separated by a delimiter. The default delimiter between labels is a pipe (|).
\"\ + \"documentation\":\"Indicates the mode in which the classifier will be trained. The classifier can be trained in multi-class (single-label) mode or multi-label mode. Multi-class mode identifies a single class label for each document and multi-label mode identifies one or more class labels for each document. Multiple labels for an individual document are separated by a delimiter. The default delimiter between labels is a pipe (|).
\"\ },\ \"ModelKmsKeyId\":{\ \"shape\":\"KmsKeyId\",\ @@ -2383,7 +2399,7 @@ - (NSString *)definitionString { },\ \"ActiveModelArn\":{\ \"shape\":\"ComprehendModelArn\",\ - \"documentation\":\"To associate an existing model with the flywheel, specify the Amazon Resource Number (ARN) of the model version.
\"\ + \"documentation\":\"To associate an existing model with the flywheel, specify the Amazon Resource Number (ARN) of the model version. Do not set TaskConfig
or ModelType
if you specify an ActiveModelArn
.
Configuration about the custom classifier associated with the flywheel.
\"\ + \"documentation\":\"Configuration about the model associated with the flywheel. You need to set TaskConfig
if you are creating a flywheel for a new model.
The model type.
\"\ + \"documentation\":\"The model type. You need to set ModelType
if you are creating a flywheel for a new model.
A list of up to 10 text strings. Each string has a maximum size of 1 KB, and the maximum size of the list is 10 KB.
\"\ + },\ + \"LanguageCode\":{\ + \"shape\":\"LanguageCode\",\ + \"documentation\":\"The language of the input text. Currently, English is the only supported language.
\"\ + }\ + }\ + },\ + \"DetectToxicContentResponse\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"ResultList\":{\ + \"shape\":\"ListOfToxicLabels\",\ + \"documentation\":\"Results of the content moderation analysis. Each entry in the results list contains a list of toxic content types identified in the text, along with a confidence score for each content type. The results list also includes a toxicity score for each entry in the results list.
\"\ + }\ + }\ + },\ \"DocumentClass\":{\ \"type\":\"structure\",\ \"members\":{\ @@ -3324,7 +3366,7 @@ - (NSString *)definitionString { \"documentation\":\"One or more labels to associate with the custom classifier.
\"\ }\ },\ - \"documentation\":\"Configuration required for a custom classification model.
\"\ + \"documentation\":\"Configuration required for a document classification model.
\"\ },\ \"DocumentClassificationJobFilter\":{\ \"type\":\"structure\",\ @@ -3401,7 +3443,7 @@ - (NSString *)definitionString { },\ \"VpcConfig\":{\ \"shape\":\"VpcConfig\",\ - \"documentation\":\"Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your document classification job. For more information, see Amazon VPC.
\"\ + \"documentation\":\"Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your document classification job. For more information, see Amazon VPC.
\"\ },\ \"FlywheelArn\":{\ \"shape\":\"ComprehendFlywheelArn\",\ @@ -3450,12 +3492,12 @@ - (NSString *)definitionString { \"documentation\":\"The S3 URI location of the test documents included in the TestS3Uri CSV file. This field is not required if you do not specify a test CSV file.
\"\ }\ },\ - \"documentation\":\"The location of the training documents. This parameter is required in a request to create a native classifier model.
\"\ + \"documentation\":\"The location of the training documents. This parameter is required in a request to create a semi-structured document classification model.
\"\ },\ \"DocumentClassifierEndpointArn\":{\ \"type\":\"string\",\ \"max\":256,\ - \"pattern\":\"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*\"\ + \"pattern\":\"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:([0-9]{12}|aws):document-classifier-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*\"\ },\ \"DocumentClassifierFilter\":{\ \"type\":\"structure\",\ @@ -3492,7 +3534,7 @@ - (NSString *)definitionString { },\ \"TestS3Uri\":{\ \"shape\":\"S3Uri\",\ - \"documentation\":\"This specifies the Amazon S3 location where the test annotations for an entity recognizer are located. The URI must be in the same Amazon Web Services Region as the API endpoint that you are calling.
\"\ + \"documentation\":\"This specifies the Amazon S3 location that contains the test annotations for the document classifier. The URI must be in the same Amazon Web Services Region as the API endpoint that you are calling.
\"\ },\ \"LabelDelimiter\":{\ \"shape\":\"LabelDelimiter\",\ @@ -3504,11 +3546,11 @@ - (NSString *)definitionString { },\ \"DocumentType\":{\ \"shape\":\"DocumentClassifierDocumentTypeFormat\",\ - \"documentation\":\"The type of input documents for training the model. Provide plain-text documents to create a plain-text model, and provide semi-structured documents to create a native model.
\"\ + \"documentation\":\"The type of input documents for training the model. Provide plain-text documents to create a plain-text model, and provide semi-structured documents to create a native document model.
\"\ },\ \"Documents\":{\ \"shape\":\"DocumentClassifierDocuments\",\ - \"documentation\":\"The S3 location of the training documents. This parameter is required in a request to create a native classifier model.
\"\ + \"documentation\":\"The S3 location of the training documents. This parameter is required in a request to create a native document model.
\"\ },\ \"DocumentReaderConfig\":{\"shape\":\"DocumentReaderConfig\"}\ },\ @@ -3537,7 +3579,7 @@ - (NSString *)definitionString { \"documentation\":\"The Amazon S3 prefix for the data lake location of the flywheel statistics.
\"\ }\ },\ - \"documentation\":\"Provide the location for output data from a custom classifier job. This field is mandatory if you are training a native classifier model.
\"\ + \"documentation\":\"Provide the location for output data from a custom classifier job. This field is mandatory if you are training a native document model.
\"\ },\ \"DocumentClassifierProperties\":{\ \"type\":\"structure\",\ @@ -3596,7 +3638,7 @@ - (NSString *)definitionString { },\ \"VpcConfig\":{\ \"shape\":\"VpcConfig\",\ - \"documentation\":\"Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.
\"\ + \"documentation\":\"Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.
\"\ },\ \"Mode\":{\ \"shape\":\"DocumentClassifierMode\",\ @@ -3696,7 +3738,7 @@ - (NSString *)definitionString { },\ \"DocumentReadFeatureTypes\":{\ \"type\":\"string\",\ - \"documentation\":\"Specifies the type of Amazon Textract features to apply. If you chose TEXTRACT_ANALYZE_DOCUMENT
as the read action, you must specify one or both of the following values:
TABLES
- Returns additional information about any tables that are detected in the input document.
FORMS
- Returns additional information about any forms that are detected in the input document.
TABLES or FORMS
\",\ \"enum\":[\ \"TABLES\",\ \"FORMS\"\ @@ -3723,7 +3765,7 @@ - (NSString *)definitionString { },\ \"FeatureTypes\":{\ \"shape\":\"ListOfDocumentReadFeatureTypes\",\ - \"documentation\":\"Specifies the type of Amazon Textract features to apply. If you chose TEXTRACT_ANALYZE_DOCUMENT
as the read action, you must specify one or both of the following values:
TABLES
- Returns information about any tables that are detected in the input document.
FORMS
- Returns information and the data from any forms that are detected in the input document.
Specifies the type of Amazon Textract features to apply. If you chose TEXTRACT_ANALYZE_DOCUMENT
as the read action, you must specify one or both of the following values:
TABLES
- Returns additional information about any tables that are detected in the input document.
FORMS
- Returns additional information about any forms that are detected in the input document.
Provides configuration parameters to override the default actions for extracting text from PDF documents and image files.
By default, Amazon Comprehend performs the following actions to extract text from files, based on the input file type:
Word files - Amazon Comprehend parser extracts the text.
Digital PDF files - Amazon Comprehend parser extracts the text.
Image files and scanned PDF files - Amazon Comprehend uses the Amazon Textract DetectDocumentText
API to extract the text.
DocumentReaderConfig
does not apply to plain text files or Word files.
For image files and PDF documents, you can override these default actions using the fields listed below. For more information, see Setting text extraction options in the Comprehend Developer Guide.
\"\ @@ -4437,7 +4479,7 @@ - (NSString *)definitionString { \"members\":{\ \"Type\":{\ \"shape\":\"EntityTypeName\",\ - \"documentation\":\"An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.
Entity types must not contain the following invalid characters: \\\\n (line break), \\\\\\\\n (escaped line break, \\\\r (carriage return), \\\\\\\\r (escaped carriage return), \\\\t (tab), \\\\\\\\t (escaped tab), space, and , (comma).
\"\ + \"documentation\":\"An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.
Entity types must not contain the following invalid characters: \\\\n (line break), \\\\\\\\n (escaped line break, \\\\r (carriage return), \\\\\\\\r (escaped carriage return), \\\\t (tab), \\\\\\\\t (escaped tab), and , (comma).
\"\ }\ },\ \"documentation\":\"An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.
\"\ @@ -4699,7 +4741,7 @@ - (NSString *)definitionString { },\ \"TaskConfig\":{\ \"shape\":\"TaskConfig\",\ - \"documentation\":\"Configuration about the custom classifier associated with the flywheel.
\"\ + \"documentation\":\"Configuration about the model associated with a flywheel.
\"\ },\ \"DataLakeS3Uri\":{\ \"shape\":\"S3Uri\",\ @@ -4909,10 +4951,10 @@ - (NSString *)definitionString { \"members\":{\ \"Reason\":{\ \"shape\":\"InvalidRequestDetailReason\",\ - \"documentation\":\"Reason code is INVALID_DOCUMENT
.
Reason codes include the following values:
DOCUMENT_SIZE_EXCEEDED - Document size is too large. Check the size of your file and resubmit the request.
UNSUPPORTED_DOC_TYPE - Document type is not supported. Check the file type and resubmit the request.
PAGE_LIMIT_EXCEEDED - Too many pages in the document. Check the number of pages in your file and resubmit the request.
TEXTRACT_ACCESS_DENIED - Access denied to Amazon Textract. Verify that your account has permission to use Amazon Textract API operations and resubmit the request.
NOT_TEXTRACT_JSON - Document is not Amazon Textract JSON format. Verify the format and resubmit the request.
MISMATCHED_TOTAL_PAGE_COUNT - Check the number of pages in your file and resubmit the request.
INVALID_DOCUMENT - Invalid document. Check the file and resubmit the request.
Provides additional detail about why the request failed:
Document size is too large - Check the size of your file and resubmit the request.
Document type is not supported - Check the file type and resubmit the request.
Too many pages in the document - Check the number of pages in your file and resubmit the request.
Access denied to Amazon Textract - Verify that your account has permission to use Amazon Textract API operations and resubmit the request.
Provides additional detail about why the request failed.
\"\ },\ \"InvalidRequestDetailReason\":{\ \"type\":\"string\",\ @@ -5617,6 +5659,20 @@ - (NSString *)definitionString { \"type\":\"list\",\ \"member\":{\"shape\":\"TargetedSentimentEntity\"}\ },\ + \"ListOfTextSegments\":{\ + \"type\":\"list\",\ + \"member\":{\"shape\":\"TextSegment\"},\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"ListOfToxicContent\":{\ + \"type\":\"list\",\ + \"member\":{\"shape\":\"ToxicContent\"}\ + },\ + \"ListOfToxicLabels\":{\ + \"type\":\"list\",\ + \"member\":{\"shape\":\"ToxicLabels\"}\ + },\ \"ListOfWarnings\":{\ \"type\":\"list\",\ \"member\":{\"shape\":\"WarningsListItem\"}\ @@ -5784,7 +5840,7 @@ - (NSString *)definitionString { },\ \"SentimentScore\":{\"shape\":\"SentimentScore\"}\ },\ - \"documentation\":\"Contains the sentiment and sentiment score for one mention of an entity.
For more information about targeted sentiment, see Targeted sentiment.
\"\ + \"documentation\":\"Contains the sentiment and sentiment score for one mention of an entity.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
\"\ },\ \"ModelStatus\":{\ \"type\":\"string\",\ @@ -5822,7 +5878,7 @@ - (NSString *)definitionString { },\ \"KmsKeyId\":{\ \"shape\":\"KmsKeyId\",\ - \"documentation\":\"ID for the Amazon Web Services Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
KMS Key ID: \\\"1234abcd-12ab-34cd-56ef-1234567890ab\\\"
Amazon Resource Name (ARN) of a KMS Key: \\\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\\\"
KMS Key Alias: \\\"alias/ExampleAlias\\\"
ARN of a KMS Key Alias: \\\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\\\"
ID for the Amazon Web Services Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. Specify the Key Id of a symmetric key, because you cannot use an asymmetric key for uploading data to S3.
The KmsKeyId can be one of the following formats:
KMS Key ID: \\\"1234abcd-12ab-34cd-56ef-1234567890ab\\\"
Amazon Resource Name (ARN) of a KMS Key: \\\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\\\"
KMS Key Alias: \\\"alias/ExampleAlias\\\"
ARN of a KMS Key Alias: \\\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\\\"
Provides configuration parameters for the output of inference jobs.
\"\ @@ -6838,7 +6894,7 @@ - (NSString *)definitionString { },\ \"DataAccessRoleArn\":{\ \"shape\":\"IamRoleArn\",\ - \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. For more information, see Role-based permissions.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. For more information, see Role-based permissions.
\"\ },\ \"JobName\":{\ \"shape\":\"JobName\",\ @@ -7340,7 +7396,7 @@ - (NSString *)definitionString { \"documentation\":\"An array of mentions of the entity in the document. The array represents a co-reference group. See Co-reference group for an example.
\"\ }\ },\ - \"documentation\":\"Information about one of the entities found by targeted sentiment analysis.
For more information about targeted sentiment, see Targeted sentiment.
\"\ + \"documentation\":\"Information about one of the entities found by targeted sentiment analysis.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
\"\ },\ \"TargetedSentimentEntityType\":{\ \"type\":\"string\",\ @@ -7396,7 +7452,7 @@ - (NSString *)definitionString { \"documentation\":\"The offset into the document text where the mention ends.
\"\ }\ },\ - \"documentation\":\"Information about one mention of an entity. The mention information includes the location of the mention in the text and the sentiment of the mention.
For more information about targeted sentiment, see Targeted sentiment.
\"\ + \"documentation\":\"Information about one mention of an entity. The mention information includes the location of the mention in the text and the sentiment of the mention.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
\"\ },\ \"TaskConfig\":{\ \"type\":\"structure\",\ @@ -7408,14 +7464,25 @@ - (NSString *)definitionString { },\ \"DocumentClassificationConfig\":{\ \"shape\":\"DocumentClassificationConfig\",\ - \"documentation\":\"Configuration required for a classification model.
\"\ + \"documentation\":\"Configuration required for a document classification model.
\"\ },\ \"EntityRecognitionConfig\":{\ \"shape\":\"EntityRecognitionConfig\",\ \"documentation\":\"Configuration required for an entity recognition model.
\"\ }\ },\ - \"documentation\":\"Configuration about the custom classifier associated with the flywheel.
\"\ + \"documentation\":\"Configuration about the model associated with a flywheel.
\"\ + },\ + \"TextSegment\":{\ + \"type\":\"structure\",\ + \"required\":[\"Text\"],\ + \"members\":{\ + \"Text\":{\ + \"shape\":\"CustomerInputString\",\ + \"documentation\":\"The text content.
\"\ + }\ + },\ + \"documentation\":\"One of the of text strings. Each string has a size limit of 1KB.
\"\ },\ \"TextSizeLimitExceededException\":{\ \"type\":\"structure\",\ @@ -7534,12 +7601,52 @@ - (NSString *)definitionString { \"type\":\"list\",\ \"member\":{\"shape\":\"TopicsDetectionJobProperties\"}\ },\ + \"ToxicContent\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"Name\":{\ + \"shape\":\"ToxicContentType\",\ + \"documentation\":\"The name of the toxic content type.
\"\ + },\ + \"Score\":{\ + \"shape\":\"Float\",\ + \"documentation\":\"Model confidence in the detected content type. Value range is zero to one, where one is highest confidence.
\"\ + }\ + },\ + \"documentation\":\"Toxic content analysis result for one string. For more information about toxicity detection, see Toxicity detection in the Amazon Comprehend Developer Guide
\"\ + },\ + \"ToxicContentType\":{\ + \"type\":\"string\",\ + \"enum\":[\ + \"GRAPHIC\",\ + \"HARASSMENT_OR_ABUSE\",\ + \"HATE_SPEECH\",\ + \"INSULT\",\ + \"PROFANITY\",\ + \"SEXUAL\",\ + \"VIOLENCE_OR_THREAT\"\ + ]\ + },\ + \"ToxicLabels\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"Labels\":{\ + \"shape\":\"ListOfToxicContent\",\ + \"documentation\":\"Array of toxic content types identified in the string.
\"\ + },\ + \"Toxicity\":{\ + \"shape\":\"Float\",\ + \"documentation\":\"Overall toxicity score for the string. Value range is zero to one, where one is the highest confidence.
\"\ + }\ + },\ + \"documentation\":\"Toxicity analysis result for one string. For more information about toxicity detection, see Toxicity detection in the Amazon Comprehend Developer Guide.
\"\ + },\ \"UnsupportedLanguageException\":{\ \"type\":\"structure\",\ \"members\":{\ \"Message\":{\"shape\":\"String\"}\ },\ - \"documentation\":\"Amazon Comprehend can't process the language of the input text. For custom entity recognition APIs, only English, Spanish, French, Italian, German, or Portuguese are accepted. For a list of supported languages, Supported languages in the Comprehend Developer Guide.
\",\ + \"documentation\":\"Amazon Comprehend can't process the language of the input text. For a list of supported languages, Supported languages in the Comprehend Developer Guide.
\",\ \"exception\":true\ },\ \"UntagResourceRequest\":{\ @@ -7684,7 +7791,7 @@ - (NSString *)definitionString { \"documentation\":\"Text message associated with the warning.
\"\ }\ },\ - \"documentation\":\"The system identified one of the following warnings while processing the input document:
The document to classify is plain text, but the classifier is a native model.
The document to classify is semi-structured, but the classifier is a plain-text model.
The system identified one of the following warnings while processing the input document:
The document to classify is plain text, but the classifier is a native document model.
The document to classify is semi-structured, but the classifier is a plain-text model.
Amazon Comprehend is an Amazon Web Services service for gaining insight into the content of documents. Use these actions to determine the topics contained in your documents, the topics they discuss, the predominant sentiment expressed in them, the predominant language used, and more.
\"\ diff --git a/AWSComprehend/AWSComprehendService.h b/AWSComprehend/AWSComprehendService.h index 146217f7da7..09283d5ce84 100644 --- a/AWSComprehend/AWSComprehendService.h +++ b/AWSComprehend/AWSComprehendService.h @@ -300,7 +300,7 @@ FOUNDATION_EXPORT NSString *const AWSComprehendSDKVersion; - (void)batchDetectSyntax:(AWSComprehendBatchDetectSyntaxRequest *)request completionHandler:(void (^ _Nullable)(AWSComprehendBatchDetectSyntaxResponse * _Nullable response, NSError * _Nullable error))completionHandler; /** -Inspects a batch of documents and returns a sentiment analysis for each entity identified in the documents.
For more information about targeted sentiment, see Targeted sentiment.
+Inspects a batch of documents and returns a sentiment analysis for each entity identified in the documents.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
@param request A container for the necessary parameters to execute the BatchDetectTargetedSentiment service method. @@ -312,7 +312,7 @@ FOUNDATION_EXPORT NSString *const AWSComprehendSDKVersion; - (AWSTaskInspects a batch of documents and returns a sentiment analysis for each entity identified in the documents.
For more information about targeted sentiment, see Targeted sentiment.
+Inspects a batch of documents and returns a sentiment analysis for each entity identified in the documents.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
@param request A container for the necessary parameters to execute the BatchDetectTargetedSentiment service method. @param completionHandler The completion handler to call when the load request is complete. @@ -325,7 +325,7 @@ FOUNDATION_EXPORT NSString *const AWSComprehendSDKVersion; - (void)batchDetectTargetedSentiment:(AWSComprehendBatchDetectTargetedSentimentRequest *)request completionHandler:(void (^ _Nullable)(AWSComprehendBatchDetectTargetedSentimentResponse * _Nullable response, NSError * _Nullable error))completionHandler; /** -Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
You can input plain text or you can upload a single-page input document (text, PDF, Word, or image).
If the system detects errors while processing a page in the input document, the API response includes an entry in Errors
that describes the errors.
If the system detects a document-level error in your input document, the API returns an InvalidRequestException
error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
Creates a classification request to analyze a single document in real-time. ClassifyDocument
supports the following model types:
Custom classifier - a custom model that you have created and trained. For input, you can provide plain text, a single-page document (PDF, Word, or image), or Amazon Textract API output. For more information, see Custom classification in the Amazon Comprehend Developer Guide.
Prompt safety classifier - Amazon Comprehend provides a pre-trained model for classifying input prompts for generative AI applications. For input, you provide English plain text input. For prompt safety classification, the response includes only the Classes
field. For more information about prompt safety classifiers, see Prompt safety classification in the Amazon Comprehend Developer Guide.
If the system detects errors while processing a page in the input document, the API response includes an Errors
field that describes the errors.
If the system detects a document-level error in your input document, the API returns an InvalidRequestException
error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
You can input plain text or you can upload a single-page input document (text, PDF, Word, or image).
If the system detects errors while processing a page in the input document, the API response includes an entry in Errors
that describes the errors.
If the system detects a document-level error in your input document, the API returns an InvalidRequestException
error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
Creates a classification request to analyze a single document in real-time. ClassifyDocument
supports the following model types:
Custom classifier - a custom model that you have created and trained. For input, you can provide plain text, a single-page document (PDF, Word, or image), or Amazon Textract API output. For more information, see Custom classification in the Amazon Comprehend Developer Guide.
Prompt safety classifier - Amazon Comprehend provides a pre-trained model for classifying input prompts for generative AI applications. For input, you provide English plain text input. For prompt safety classification, the response includes only the Classes
field. For more information about prompt safety classifiers, see Prompt safety classification in the Amazon Comprehend Developer Guide.
If the system detects errors while processing a page in the input document, the API response includes an Errors
field that describes the errors.
If the system detects a document-level error in your input document, the API returns an InvalidRequestException
error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
Inspects the input text and returns a sentiment analysis for each entity identified in the text.
For more information about targeted sentiment, see Targeted sentiment.
+Inspects the input text and returns a sentiment analysis for each entity identified in the text.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
@param request A container for the necessary parameters to execute the DetectTargetedSentiment service method. @@ -1187,7 +1187,7 @@ FOUNDATION_EXPORT NSString *const AWSComprehendSDKVersion; - (AWSTaskInspects the input text and returns a sentiment analysis for each entity identified in the text.
For more information about targeted sentiment, see Targeted sentiment.
+Inspects the input text and returns a sentiment analysis for each entity identified in the text.
For more information about targeted sentiment, see Targeted sentiment in the Amazon Comprehend Developer Guide.
@param request A container for the necessary parameters to execute the DetectTargetedSentiment service method. @param completionHandler The completion handler to call when the load request is complete. @@ -1199,6 +1199,31 @@ FOUNDATION_EXPORT NSString *const AWSComprehendSDKVersion; */ - (void)detectTargetedSentiment:(AWSComprehendDetectTargetedSentimentRequest *)request completionHandler:(void (^ _Nullable)(AWSComprehendDetectTargetedSentimentResponse * _Nullable response, NSError * _Nullable error))completionHandler; +/** +Performs toxicity analysis on the list of text strings that you provide as input. The API response contains a results list that matches the size of the input list. For more information about toxicity detection, see Toxicity detection in the Amazon Comprehend Developer Guide.
+ + @param request A container for the necessary parameters to execute the DetectToxicContent service method. + + @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSComprehendDetectToxicContentResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSComprehendErrorDomain` domain and the following error code: `AWSComprehendErrorInvalidRequest`, `AWSComprehendErrorTextSizeLimitExceeded`, `AWSComprehendErrorUnsupportedLanguage`, `AWSComprehendErrorInternalServer`. + + @see AWSComprehendDetectToxicContentRequest + @see AWSComprehendDetectToxicContentResponse + */ +- (AWSTaskPerforms toxicity analysis on the list of text strings that you provide as input. The API response contains a results list that matches the size of the input list. For more information about toxicity detection, see Toxicity detection in the Amazon Comprehend Developer Guide.
+ + @param request A container for the necessary parameters to execute the DetectToxicContent service method. + @param completionHandler The completion handler to call when the load request is complete. + `response` - A response object, or `nil` if the request failed. + `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSComprehendErrorDomain` domain and the following error code: `AWSComprehendErrorInvalidRequest`, `AWSComprehendErrorTextSizeLimitExceeded`, `AWSComprehendErrorUnsupportedLanguage`, `AWSComprehendErrorInternalServer`. + + @see AWSComprehendDetectToxicContentRequest + @see AWSComprehendDetectToxicContentResponse + */ +- (void)detectToxicContent:(AWSComprehendDetectToxicContentRequest *)request completionHandler:(void (^ _Nullable)(AWSComprehendDetectToxicContentResponse * _Nullable response, NSError * _Nullable error))completionHandler; + /**Creates a new custom model that replicates a source custom model that you import. The source model can be in your Amazon Web Services account or another one.
If the source model is in another Amazon Web Services account, then it must have a resource-based policy that authorizes you to import it.
The source model must be in the same Amazon Web Services Region that you're using when you import. You can't import a model that's in a different Region.
@@ -1700,7 +1725,7 @@ FOUNDATION_EXPORT NSString *const AWSComprehendSDKVersion; - (void)putResourcePolicy:(AWSComprehendPutResourcePolicyRequest *)request completionHandler:(void (^ _Nullable)(AWSComprehendPutResourcePolicyResponse * _Nullable response, NSError * _Nullable error))completionHandler; /** -Starts an asynchronous document classification job. Use the DescribeDocumentClassificationJob
operation to track the progress of the job.
Starts an asynchronous document classification job using a custom classification model. Use the DescribeDocumentClassificationJob
operation to track the progress of the job.
Starts an asynchronous document classification job. Use the DescribeDocumentClassificationJob
operation to track the progress of the job.
Starts an asynchronous document classification job using a custom classification model. Use the DescribeDocumentClassificationJob
operation to track the progress of the job.