diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b762d181b6d..d04b23abbe0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,477 @@ +July 18th, 2025 +=============== +**Service Features:** +- `aws-sdk-auditmanager` (1.79.0): Updated error handling for RegisterOrganizationAdminAccount API to properly translate TooManyExceptions to HTTP 429 status code. This enhancement improves error handling consistency and provides clearer feedback when request limits are exceeded. +- `aws-sdk-cloudwatchlogs` (1.94.0): CloudWatchLogs launches GetLogObject API with streaming support for efficient log data retrieval. Logs added support for new AccountPolicy type METRIC_EXTRACTION_POLICY. For more information, see CloudWatch Logs API documentation +- `aws-sdk-mediaconvert` (1.96.0): This release adds support for TAMS server integration with MediaConvert inputs. +- `aws-sdk-outposts` (1.85.0): Add AWS Outposts API to surface customer billing information +- `aws-sdk-sesv2` (1.88.0): Added IP Visibility support for managed dedicated pools. Enhanced GetDedicatedIp and GetDedicatedIps APIs to return managed IP addresses. + +**Service Documentation:** +- `aws-sdk-ssm` (1.85.1): July 2025 doc-only updates for Systems Manager. + +**Crate Versions** +
+Click to expand to view crate versions... + +|Crate|Version| +|-|-| +|aws-config|1.8.2| +|aws-credential-types|1.2.4| +|aws-endpoint|0.60.3| +|aws-http|0.60.6| +|aws-hyper|0.60.3| +|aws-runtime|1.5.9| +|aws-runtime-api|1.1.8| +|aws-sdk-accessanalyzer|1.83.0| +|aws-sdk-account|1.79.0| +|aws-sdk-acm|1.79.0| +|aws-sdk-acmpca|1.81.0| +|aws-sdk-aiops|1.4.0| +|aws-sdk-amp|1.79.0| +|aws-sdk-amplify|1.85.0| +|aws-sdk-amplifybackend|1.76.0| +|aws-sdk-amplifyuibuilder|1.76.0| +|aws-sdk-apigateway|1.79.0| +|aws-sdk-apigatewaymanagement|1.76.0| +|aws-sdk-apigatewayv2|1.78.0| +|aws-sdk-appconfig|1.81.0| +|aws-sdk-appconfigdata|1.76.0| +|aws-sdk-appfabric|1.76.0| +|aws-sdk-appflow|1.76.0| +|aws-sdk-appintegrations|1.77.0| +|aws-sdk-applicationautoscaling|1.82.0| +|aws-sdk-applicationcostprofiler|1.76.0| +|aws-sdk-applicationdiscovery|1.78.0| +|aws-sdk-applicationinsights|1.77.0| +|aws-sdk-applicationsignals|1.52.0| +|aws-sdk-appmesh|1.76.0| +|aws-sdk-apprunner|1.77.0| +|aws-sdk-appstream|1.82.0| +|aws-sdk-appsync|1.89.0| +|aws-sdk-apptest|1.46.0| +|aws-sdk-arczonalshift|1.80.0| +|aws-sdk-artifact|1.66.0| +|aws-sdk-athena|1.81.0| +|aws-sdk-auditmanager|1.79.0| +|aws-sdk-autoscaling|1.86.0| +|aws-sdk-autoscalingplans|1.76.0| +|aws-sdk-b2bi|1.82.0| +|aws-sdk-backup|1.82.0| +|aws-sdk-backupgateway|1.76.0| +|aws-sdk-backupsearch|1.25.0| +|aws-sdk-batch|1.87.0| +|aws-sdk-bcmdataexports|1.74.0| +|aws-sdk-bcmpricingcalculator|1.31.0| +|aws-sdk-bedrock|1.104.0| +|aws-sdk-bedrockagent|1.105.0| +|aws-sdk-bedrockagentcore|1.0.0| +|aws-sdk-bedrockagentcorecontrol|1.0.0| +|aws-sdk-bedrockagentruntime|1.102.0| +|aws-sdk-bedrockdataautomation|1.28.0| +|aws-sdk-bedrockdataautomationruntime|1.27.0| +|aws-sdk-bedrockruntime|1.97.0| +|aws-sdk-billing|1.28.0| +|aws-sdk-billingconductor|1.77.0| +|aws-sdk-braket|1.78.0| +|aws-sdk-budgets|1.79.0| +|aws-sdk-chatbot|1.66.0| +|aws-sdk-chime|1.77.0| +|aws-sdk-chimesdkidentity|1.76.0| +|aws-sdk-chimesdkmediapipelines|1.78.0| +|aws-sdk-chimesdkmeetings|1.77.0| +|aws-sdk-chimesdkmessaging|1.76.0| +|aws-sdk-chimesdkvoice|1.79.0| +|aws-sdk-cleanrooms|1.87.0| +|aws-sdk-cleanroomsml|1.78.0| +|aws-sdk-cloud9|1.77.0| +|aws-sdk-cloudcontrol|1.77.0| +|aws-sdk-clouddirectory|1.76.0| +|aws-sdk-cloudformation|1.85.0| +|aws-sdk-cloudfront|1.83.1| +|aws-sdk-cloudfrontkeyvaluestore|1.76.0| +|aws-sdk-cloudhsm|1.76.0| +|aws-sdk-cloudhsmv2|1.79.0| +|aws-sdk-cloudsearch|1.76.0| +|aws-sdk-cloudsearchdomain|1.76.0| +|aws-sdk-cloudtrail|1.83.0| +|aws-sdk-cloudtraildata|1.76.0| +|aws-sdk-cloudwatch|1.82.0| +|aws-sdk-cloudwatchevents|1.76.0| +|aws-sdk-cloudwatchlogs|1.94.0| +|aws-sdk-codeartifact|1.79.0| +|aws-sdk-codebuild|1.99.0| +|aws-sdk-codecatalyst|1.77.0| +|aws-sdk-codecommit|1.77.0| +|aws-sdk-codeconnections|1.59.0| +|aws-sdk-codedeploy|1.78.0| +|aws-sdk-codeguruprofiler|1.76.0| +|aws-sdk-codegurureviewer|1.76.0| +|aws-sdk-codegurusecurity|1.77.0| +|aws-sdk-codepipeline|1.86.0| +|aws-sdk-codestarconnections|1.78.0| +|aws-sdk-codestarnotifications|1.76.0| +|aws-sdk-cognitoidentity|1.77.0| +|aws-sdk-cognitoidentityprovider|1.89.0| +|aws-sdk-cognitosync|1.76.0| +|aws-sdk-comprehend|1.76.0| +|aws-sdk-comprehendmedical|1.76.0| +|aws-sdk-computeoptimizer|1.80.0| +|aws-sdk-config|1.80.0| +|aws-sdk-connect|1.115.0| +|aws-sdk-connectcampaigns|1.78.0| +|aws-sdk-connectcampaignsv2|1.28.0| +|aws-sdk-connectcases|1.83.0| +|aws-sdk-connectcontactlens|1.78.0| +|aws-sdk-connectparticipant|1.78.0| +|aws-sdk-controlcatalog|1.61.0| +|aws-sdk-controltower|1.82.0| +|aws-sdk-costandusagereport|1.77.0| +|aws-sdk-costexplorer|1.85.0| +|aws-sdk-costoptimizationhub|1.81.0| +|aws-sdk-customerprofiles|1.83.0| +|aws-sdk-databasemigration|1.85.0| +|aws-sdk-databrew|1.76.0| +|aws-sdk-dataexchange|1.79.0| +|aws-sdk-datapipeline|1.76.0| +|aws-sdk-datasync|1.83.0| +|aws-sdk-datazone|1.92.0| +|aws-sdk-dax|1.76.0| +|aws-sdk-deadline|1.67.0| +|aws-sdk-detective|1.77.0| +|aws-sdk-devicefarm|1.79.0| +|aws-sdk-devopsguru|1.76.0| +|aws-sdk-directconnect|1.79.0| +|aws-sdk-directory|1.78.0| +|aws-sdk-directoryservicedata|1.33.0| +|aws-sdk-dlm|1.76.0| +|aws-sdk-docdb|1.80.0| +|aws-sdk-docdbelastic|1.78.0| +|aws-sdk-drs|1.79.0| +|aws-sdk-dsql|1.30.0| +|aws-sdk-dynamodb|1.84.0| +|aws-sdk-dynamodbstreams|1.76.0| +|aws-sdk-ebs|1.76.0| +|aws-sdk-ec2|1.148.0| +|aws-sdk-ec2instanceconnect|1.76.0| +|aws-sdk-ecr|1.84.0| +|aws-sdk-ecrpublic|1.79.0| +|aws-sdk-ecs|1.89.0| +|aws-sdk-efs|1.79.0| +|aws-sdk-eks|1.97.0| +|aws-sdk-eksauth|1.74.0| +|aws-sdk-elasticache|1.82.0| +|aws-sdk-elasticbeanstalk|1.77.0| +|aws-sdk-elasticloadbalancing|1.77.0| +|aws-sdk-elasticloadbalancingv2|1.86.0| +|aws-sdk-elasticsearch|1.79.0| +|aws-sdk-elastictranscoder|1.77.0| +|aws-sdk-emr|1.82.0| +|aws-sdk-emrcontainers|1.81.0| +|aws-sdk-emrserverless|1.84.0| +|aws-sdk-entityresolution|1.83.0| +|aws-sdk-eventbridge|1.83.0| +|aws-sdk-evidently|1.76.0| +|aws-sdk-evs|1.4.0| +|aws-sdk-finspace|1.80.0| +|aws-sdk-finspacedata|1.76.0| +|aws-sdk-firehose|1.84.0| +|aws-sdk-fis|1.79.0| +|aws-sdk-fms|1.81.0| +|aws-sdk-forecast|1.76.0| +|aws-sdk-forecastquery|1.76.0| +|aws-sdk-frauddetector|1.76.0| +|aws-sdk-freetier|1.75.0| +|aws-sdk-fsx|1.85.0| +|aws-sdk-gamelift|1.83.0| +|aws-sdk-gameliftstreams|1.16.0| +|aws-sdk-geomaps|1.30.0| +|aws-sdk-geoplaces|1.30.0| +|aws-sdk-georoutes|1.30.0| +|aws-sdk-glacier|1.76.0| +|aws-sdk-globalaccelerator|1.78.0| +|aws-sdk-glue|1.107.0| +|aws-sdk-grafana|1.78.0| +|aws-sdk-greengrass|1.76.0| +|aws-sdk-greengrassv2|1.77.0| +|aws-sdk-groundstation|1.78.0| +|aws-sdk-guardduty|1.88.0| +|aws-sdk-health|1.77.0| +|aws-sdk-healthlake|1.78.0| +|aws-sdk-iam|1.81.0| +|aws-sdk-identitystore|1.76.0| +|aws-sdk-imagebuilder|1.81.0| +|aws-sdk-inspector|1.76.0| +|aws-sdk-inspector2|1.84.0| +|aws-sdk-inspectorscan|1.76.0| +|aws-sdk-internetmonitor|1.81.0| +|aws-sdk-invoicing|1.27.0| +|aws-sdk-iot|1.86.0| +|aws-sdk-iotanalytics|1.76.0| +|aws-sdk-iotdataplane|1.76.0| +|aws-sdk-iotdeviceadvisor|1.77.0| +|aws-sdk-iotevents|1.77.0| +|aws-sdk-ioteventsdata|1.76.0| +|aws-sdk-iotfleethub|1.76.0| +|aws-sdk-iotfleetwise|1.83.0| +|aws-sdk-iotjobsdataplane|1.77.0| +|aws-sdk-iotmanagedintegrations|1.17.0| +|aws-sdk-iotsecuretunneling|1.77.0| +|aws-sdk-iotsitewise|1.80.0| +|aws-sdk-iotthingsgraph|1.76.0| +|aws-sdk-iottwinmaker|1.76.0| +|aws-sdk-iotwireless|1.81.0| +|aws-sdk-ivs|1.82.0| +|aws-sdk-ivschat|1.77.0| +|aws-sdk-ivsrealtime|1.85.0| +|aws-sdk-kafka|1.80.0| +|aws-sdk-kafkaconnect|1.78.0| +|aws-sdk-kendra|1.78.0| +|aws-sdk-kendraranking|1.76.0| +|aws-sdk-keyspaces|1.81.0| +|aws-sdk-keyspacesstreams|1.3.1| +|aws-sdk-kinesis|1.80.0| +|aws-sdk-kinesisanalytics|1.76.0| +|aws-sdk-kinesisanalyticsv2|1.80.0| +|aws-sdk-kinesisvideo|1.77.0| +|aws-sdk-kinesisvideoarchivedmedia|1.77.0| +|aws-sdk-kinesisvideomedia|1.76.0| +|aws-sdk-kinesisvideosignaling|1.76.0| +|aws-sdk-kinesisvideowebrtcstorage|1.77.0| +|aws-sdk-kms|1.79.0| +|aws-sdk-lakeformation|1.80.0| +|aws-sdk-lambda|1.88.0| +|aws-sdk-launchwizard|1.77.0| +|aws-sdk-lexmodelbuilding|1.76.0| +|aws-sdk-lexmodelsv2|1.82.0| +|aws-sdk-lexruntime|1.76.0| +|aws-sdk-lexruntimev2|1.76.0| +|aws-sdk-licensemanager|1.78.0| +|aws-sdk-licensemanagerlinuxsubscriptions|1.77.0| +|aws-sdk-licensemanagerusersubscriptions|1.77.0| +|aws-sdk-lightsail|1.80.0| +|aws-sdk-location|1.80.0| +|aws-sdk-lookoutequipment|1.78.0| +|aws-sdk-lookoutmetrics|1.76.0| +|aws-sdk-lookoutvision|1.76.0| +|aws-sdk-m2|1.80.0| +|aws-sdk-machinelearning|1.76.0| +|aws-sdk-macie2|1.79.0| +|aws-sdk-mailmanager|1.59.0| +|aws-sdk-managedblockchain|1.76.0| +|aws-sdk-managedblockchainquery|1.79.0| +|aws-sdk-marketplaceagreement|1.73.0| +|aws-sdk-marketplacecatalog|1.81.0| +|aws-sdk-marketplacecommerceanalytics|1.76.0| +|aws-sdk-marketplacedeployment|1.73.0| +|aws-sdk-marketplaceentitlement|1.80.0| +|aws-sdk-marketplacemetering|1.78.0| +|aws-sdk-marketplacereporting|1.32.0| +|aws-sdk-mediaconnect|1.81.0| +|aws-sdk-mediaconvert|1.96.0| +|aws-sdk-medialive|1.98.0| +|aws-sdk-mediapackage|1.76.0| +|aws-sdk-mediapackagev2|1.85.0| +|aws-sdk-mediapackagevod|1.76.0| +|aws-sdk-mediastore|1.76.0| +|aws-sdk-mediastoredata|1.76.0| +|aws-sdk-mediatailor|1.83.0| +|aws-sdk-medicalimaging|1.80.0| +|aws-sdk-memorydb|1.79.0| +|aws-sdk-mgn|1.76.0| +|aws-sdk-migrationhub|1.77.0| +|aws-sdk-migrationhubconfig|1.76.0| +|aws-sdk-migrationhuborchestrator|1.77.0| +|aws-sdk-migrationhubrefactorspaces|1.76.0| +|aws-sdk-migrationhubstrategy|1.76.0| +|aws-sdk-mpa|1.3.0| +|aws-sdk-mq|1.78.0| +|aws-sdk-mturk|1.76.0| +|aws-sdk-mwaa|1.82.0| +|aws-sdk-neptune|1.78.0| +|aws-sdk-neptunedata|1.76.0| +|aws-sdk-neptunegraph|1.76.0| +|aws-sdk-networkfirewall|1.83.0| +|aws-sdk-networkflowmonitor|1.29.0| +|aws-sdk-networkmanager|1.80.0| +|aws-sdk-networkmonitor|1.67.0| +|aws-sdk-notifications|1.28.0| +|aws-sdk-notificationscontacts|1.27.0| +|aws-sdk-oam|1.79.0| +|aws-sdk-observabilityadmin|1.27.0| +|aws-sdk-odb|1.2.0| +|aws-sdk-omics|1.82.0| +|aws-sdk-opensearch|1.89.0| +|aws-sdk-opensearchserverless|1.80.0| +|aws-sdk-opsworks|1.76.0| +|aws-sdk-opsworkscm|1.76.0| +|aws-sdk-organizations|1.85.0| +|aws-sdk-osis|1.78.0| +|aws-sdk-outposts|1.85.0| +|aws-sdk-panorama|1.76.0| +|aws-sdk-partnercentralselling|1.29.0| +|aws-sdk-paymentcryptography|1.82.0| +|aws-sdk-paymentcryptographydata|1.81.0| +|aws-sdk-pcaconnectorad|1.77.0| +|aws-sdk-pcaconnectorscep|1.46.0| +|aws-sdk-pcs|1.42.0| +|aws-sdk-personalize|1.79.0| +|aws-sdk-personalizeevents|1.76.0| +|aws-sdk-personalizeruntime|1.76.0| +|aws-sdk-pi|1.78.0| +|aws-sdk-pinpoint|1.78.0| +|aws-sdk-pinpointemail|1.76.0| +|aws-sdk-pinpointsmsvoice|1.76.0| +|aws-sdk-pinpointsmsvoicev2|1.82.0| +|aws-sdk-pipes|1.79.0| +|aws-sdk-polly|1.80.0| +|aws-sdk-pricing|1.79.0| +|aws-sdk-privatenetworks|1.76.0| +|aws-sdk-proton|1.76.0| +|aws-sdk-qapps|1.44.0| +|aws-sdk-qbusiness|1.90.0| +|aws-sdk-qconnect|1.85.0| +|aws-sdk-qldb|1.76.0| +|aws-sdk-qldbsession|1.76.0| +|aws-sdk-quicksight|1.95.0| +|aws-sdk-ram|1.76.0| +|aws-sdk-rbin|1.78.0| +|aws-sdk-rds|1.98.0| +|aws-sdk-rdsdata|1.78.0| +|aws-sdk-redshift|1.81.0| +|aws-sdk-redshiftdata|1.79.0| +|aws-sdk-redshiftserverless|1.81.0| +|aws-sdk-rekognition|1.80.0| +|aws-sdk-repostspace|1.75.0| +|aws-sdk-resiliencehub|1.80.0| +|aws-sdk-resourceexplorer2|1.79.0| +|aws-sdk-resourcegroups|1.79.0| +|aws-sdk-resourcegroupstagging|1.76.0| +|aws-sdk-robomaker|1.76.0| +|aws-sdk-rolesanywhere|1.80.0| +|aws-sdk-route53|1.86.0| +|aws-sdk-route53domains|1.79.0| +|aws-sdk-route53profiles|1.55.0| +|aws-sdk-route53recoverycluster|1.76.0| +|aws-sdk-route53recoverycontrolconfig|1.77.0| +|aws-sdk-route53recoveryreadiness|1.76.0| +|aws-sdk-route53resolver|1.82.0| +|aws-sdk-rum|1.78.0| +|aws-sdk-s3|1.98.0| +|aws-sdk-s3control|1.88.0| +|aws-sdk-s3outposts|1.76.0| +|aws-sdk-s3tables|1.30.0| +|aws-sdk-s3vectors|1.1.0| +|aws-sdk-sagemaker|1.141.0| +|aws-sdk-sagemakera2iruntime|1.76.0| +|aws-sdk-sagemakeredge|1.76.0| +|aws-sdk-sagemakerfeaturestoreruntime|1.77.0| +|aws-sdk-sagemakergeospatial|1.76.0| +|aws-sdk-sagemakermetrics|1.78.0| +|aws-sdk-sagemakerruntime|1.78.0| +|aws-sdk-savingsplans|1.77.0| +|aws-sdk-scheduler|1.76.0| +|aws-sdk-schemas|1.76.0| +|aws-sdk-secretsmanager|1.80.0| +|aws-sdk-securityhub|1.82.0| +|aws-sdk-securityir|1.28.0| +|aws-sdk-securitylake|1.79.0| +|aws-sdk-serverlessapplicationrepository|1.76.0| +|aws-sdk-servicecatalog|1.78.0| +|aws-sdk-servicecatalogappregistry|1.76.0| +|aws-sdk-servicediscovery|1.77.0| +|aws-sdk-servicequotas|1.77.0| +|aws-sdk-ses|1.79.0| +|aws-sdk-sesv2|1.88.0| +|aws-sdk-sfn|1.80.1| +|aws-sdk-shield|1.76.0| +|aws-sdk-signer|1.76.0| +|aws-sdk-simspaceweaver|1.76.0| +|aws-sdk-sms|1.76.0| +|aws-sdk-snowball|1.76.0| +|aws-sdk-snowdevicemanagement|1.76.0| +|aws-sdk-sns|1.77.0| +|aws-sdk-socialmessaging|1.31.0| +|aws-sdk-sqs|1.76.0| +|aws-sdk-ssm|1.85.1| +|aws-sdk-ssmcontacts|1.76.0| +|aws-sdk-ssmguiconnect|1.11.0| +|aws-sdk-ssmincidents|1.76.0| +|aws-sdk-ssmquicksetup|1.41.0| +|aws-sdk-ssmsap|1.79.0| +|aws-sdk-sso|1.76.0| +|aws-sdk-ssoadmin|1.76.0| +|aws-sdk-ssooidc|1.77.0| +|aws-sdk-storagegateway|1.83.0| +|aws-sdk-sts|1.78.0| +|aws-sdk-supplychain|1.71.0| +|aws-sdk-support|1.77.0| +|aws-sdk-supportapp|1.76.0| +|aws-sdk-swf|1.78.0| +|aws-sdk-synthetics|1.84.0| +|aws-sdk-taxsettings|1.54.0| +|aws-sdk-textract|1.77.0| +|aws-sdk-timestreaminfluxdb|1.63.0| +|aws-sdk-timestreamquery|1.80.0| +|aws-sdk-timestreamwrite|1.77.0| +|aws-sdk-tnb|1.77.0| +|aws-sdk-transcribe|1.81.0| +|aws-sdk-transcribestreaming|1.78.0| +|aws-sdk-transfer|1.88.0| +|aws-sdk-translate|1.76.0| +|aws-sdk-trustedadvisor|1.77.0| +|aws-sdk-verifiedpermissions|1.85.0| +|aws-sdk-voiceid|1.76.0| +|aws-sdk-vpclattice|1.78.0| +|aws-sdk-waf|1.77.0| +|aws-sdk-wafregional|1.77.0| +|aws-sdk-wafv2|1.88.0| +|aws-sdk-wellarchitected|1.77.0| +|aws-sdk-wisdom|1.77.0| +|aws-sdk-workdocs|1.76.0| +|aws-sdk-workmail|1.77.0| +|aws-sdk-workmailmessageflow|1.76.0| +|aws-sdk-workspaces|1.91.0| +|aws-sdk-workspacesinstances|1.3.0| +|aws-sdk-workspacesthinclient|1.81.0| +|aws-sdk-workspacesweb|1.81.0| +|aws-sdk-xray|1.77.0| +|aws-sig-auth|0.60.3| +|aws-sigv4|1.3.3| +|aws-smithy-async|1.2.5| +|aws-smithy-cbor|0.61.1| +|aws-smithy-checksums|0.63.5| +|aws-smithy-client|0.60.3| +|aws-smithy-compression|0.0.4| +|aws-smithy-eventstream|0.60.9| +|aws-smithy-eventstream-fuzz|0.1.0| +|aws-smithy-experimental|0.2.0| +|aws-smithy-http|0.62.1| +|aws-smithy-http-auth|0.60.3| +|aws-smithy-http-client|1.0.6| +|aws-smithy-http-fuzz|0.0.0| +|aws-smithy-http-tower|0.60.3| +|aws-smithy-json|0.61.4| +|aws-smithy-json-fuzz|0.0.0| +|aws-smithy-mocks|0.1.1| +|aws-smithy-mocks-experimental|0.2.4| +|aws-smithy-observability|0.1.3| +|aws-smithy-observability-otel|0.1.1| +|aws-smithy-protocol-test|0.63.4| +|aws-smithy-query|0.60.7| +|aws-smithy-runtime|1.8.4| +|aws-smithy-runtime-api|1.8.3| +|aws-smithy-types|1.3.2| +|aws-smithy-types-convert|0.60.9| +|aws-smithy-types-fuzz|0.0.0| +|aws-smithy-wasm|0.1.4| +|aws-smithy-xml|0.60.10| +|aws-types|1.3.7| +|aws-types-fuzz|0.0.0| +
+ + July 17th, 2025 =============== **Service Features:** diff --git a/Cargo.toml b/Cargo.toml index 4530f2fe5a81..831cfd41ef4e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,13 +2,13 @@ resolver = "2" exclude = [ "examples/test-utils", - "examples/lambda", "examples/cross_service", - "examples/examples", "examples/webassembly", - "tests/telemetry", + "examples/examples", + "examples/lambda", "tests/webassembly", - "tests/no-default-features" + "tests/no-default-features", + "tests/telemetry" ] members = [ "sdk/accessanalyzer", diff --git a/aws-models/auditmanager.json b/aws-models/auditmanager.json index 3b23210a8e28..8f0d6dfe320e 100644 --- a/aws-models/auditmanager.json +++ b/aws-models/auditmanager.json @@ -7895,6 +7895,9 @@ { "target": "com.amazonaws.auditmanager#ResourceNotFoundException" }, + { + "target": "com.amazonaws.auditmanager#ThrottlingException" + }, { "target": "com.amazonaws.auditmanager#ValidationException" } diff --git a/aws-models/cloudwatch-logs.json b/aws-models/cloudwatch-logs.json index a26c0c5456f6..7cd7bc717500 100644 --- a/aws-models/cloudwatch-logs.json +++ b/aws-models/cloudwatch-logs.json @@ -1266,6 +1266,9 @@ "target": "com.amazonaws.cloudwatchlogs#Arn" } }, + "com.amazonaws.cloudwatchlogs#Data": { + "type": "blob" + }, "com.amazonaws.cloudwatchlogs#DataAlreadyAcceptedException": { "type": "structure", "members": { @@ -3255,7 +3258,7 @@ "logGroupNamePattern": { "target": "com.amazonaws.cloudwatchlogs#LogGroupNamePattern", "traits": { - "smithy.api#documentation": "

If you specify a string for this parameter, the operation returns only log groups that\n have names that match the string based on a case-sensitive substring search. For example, if\n you specify DataLogs, log groups named DataLogs, aws/DataLogs, and\n GroupDataLogs would match, but datalogs, Data/log/s and\n Groupdata would not match.

\n

If you specify logGroupNamePattern in your request, then only\n arn, creationTime, and logGroupName are included in\n the response.

\n \n

\n logGroupNamePattern and logGroupNamePrefix are mutually exclusive.\n Only one of these parameters can be passed.

\n
" + "smithy.api#documentation": "

If you specify a string for this parameter, the operation returns only log groups that\n have names that match the string based on a case-sensitive substring search. For example, if\n you specify DataLogs, log groups named DataLogs,\n aws/DataLogs, and GroupDataLogs would match, but\n datalogs, Data/log/s and Groupdata would not\n match.

\n

If you specify logGroupNamePattern in your request, then only\n arn, creationTime, and logGroupName are included in\n the response.

\n \n

\n logGroupNamePattern and logGroupNamePrefix are mutually exclusive.\n Only one of these parameters can be passed.

\n
" } }, "nextToken": { @@ -4529,6 +4532,20 @@ "target": "com.amazonaws.cloudwatchlogs#FieldIndex" } }, + "com.amazonaws.cloudwatchlogs#FieldsData": { + "type": "structure", + "members": { + "data": { + "target": "com.amazonaws.cloudwatchlogs#Data", + "traits": { + "smithy.api#documentation": "

The actual log data content returned in the streaming response. This contains the fields and values of the log event in a structured format that can be parsed and processed by the client.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing the extracted fields from a log event. These fields are extracted based on the log format and can be used for structured querying and analysis.

" + } + }, "com.amazonaws.cloudwatchlogs#FilterCount": { "type": "integer" }, @@ -5435,6 +5452,88 @@ "smithy.api#output": {} } }, + "com.amazonaws.cloudwatchlogs#GetLogObject": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudwatchlogs#GetLogObjectRequest" + }, + "output": { + "target": "com.amazonaws.cloudwatchlogs#GetLogObjectResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudwatchlogs#AccessDeniedException" + }, + { + "target": "com.amazonaws.cloudwatchlogs#InvalidOperationException" + }, + { + "target": "com.amazonaws.cloudwatchlogs#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudwatchlogs#LimitExceededException" + }, + { + "target": "com.amazonaws.cloudwatchlogs#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves a large logging object (LLO) and streams it back. This API is used to fetch the content of large portions of log events that have been ingested through the PutOpenTelemetryLogs API. \n When log events contain fields that would cause the total event size to exceed 1MB, CloudWatch Logs automatically processes up to 10 fields, starting with the largest fields. Each field is truncated as needed to keep \n the total event size as close to 1MB as possible. The excess portions are stored as Large Log Objects (LLOs) and these fields are processed separately and LLO reference system fields (in the format @ptr.$[path.to.field]) are \n added. The path in the reference field reflects the original JSON structure where the large field was located. For example, this could be @ptr.$['input']['message'], @ptr.$['AAA']['BBB']['CCC']['DDD'], @ptr.$['AAA'], or any other path matching your log structure.

", + "smithy.api#endpoint": { + "hostPrefix": "streaming-" + } + } + }, + "com.amazonaws.cloudwatchlogs#GetLogObjectRequest": { + "type": "structure", + "members": { + "unmask": { + "target": "com.amazonaws.cloudwatchlogs#Unmask", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

" + } + }, + "logObjectPointer": { + "target": "com.amazonaws.cloudwatchlogs#LogObjectPointer", + "traits": { + "smithy.api#documentation": "

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The parameters for the GetLogObject operation.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.cloudwatchlogs#GetLogObjectResponse": { + "type": "structure", + "members": { + "fieldStream": { + "target": "com.amazonaws.cloudwatchlogs#GetLogObjectResponseStream" + } + }, + "traits": { + "smithy.api#documentation": "

The response from the GetLogObject operation.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.cloudwatchlogs#GetLogObjectResponseStream": { + "type": "union", + "members": { + "fields": { + "target": "com.amazonaws.cloudwatchlogs#FieldsData" + }, + "InternalStreamingException": { + "target": "com.amazonaws.cloudwatchlogs#InternalStreamingException" + } + }, + "traits": { + "smithy.api#documentation": "

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

", + "smithy.api#streaming": {} + } + }, "com.amazonaws.cloudwatchlogs#GetLogRecord": { "type": "operation", "input": { @@ -5937,6 +6036,18 @@ "com.amazonaws.cloudwatchlogs#Interleaved": { "type": "boolean" }, + "com.amazonaws.cloudwatchlogs#InternalStreamingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.cloudwatchlogs#Message" + } + }, + "traits": { + "smithy.api#documentation": "

An internal error occurred during the streaming of log data. This exception is thrown when there's an issue with the internal streaming mechanism used by the GetLogObject operation.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.cloudwatchlogs#InvalidOperationException": { "type": "structure", "members": { @@ -6965,6 +7076,15 @@ "target": "com.amazonaws.cloudwatchlogs#LogGroup" } }, + "com.amazonaws.cloudwatchlogs#LogObjectPointer": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, "com.amazonaws.cloudwatchlogs#LogRecord": { "type": "map", "key": { @@ -7243,6 +7363,9 @@ { "target": "com.amazonaws.cloudwatchlogs#GetLogGroupFields" }, + { + "target": "com.amazonaws.cloudwatchlogs#GetLogObject" + }, { "target": "com.amazonaws.cloudwatchlogs#GetLogRecord" }, @@ -9334,13 +9457,13 @@ "source": { "target": "com.amazonaws.cloudwatchlogs#Source", "traits": { - "smithy.api#documentation": "

The path to the field in the log event that you want to parse. If you omit this value, the whole log message is parsed.

" + "smithy.api#documentation": "

The path to the field in the log event that you want to parse. If you omit this value, the\n whole log message is parsed.

" } }, "eventSource": { "target": "com.amazonaws.cloudwatchlogs#EventSource", "traits": { - "smithy.api#documentation": "

Specify the service or process that produces the log events that will be converted with this processor.

", + "smithy.api#documentation": "

Specify the service or process that produces the log events that will be converted with\n this processor.

", "smithy.api#required": {} } }, @@ -9353,7 +9476,7 @@ } }, "traits": { - "smithy.api#documentation": "

This processor converts logs into Open Cybersecurity Schema Framework (OCSF) events.

\n

For more information about this processor including examples, see \n parseToOSCF in the CloudWatch Logs User Guide.

" + "smithy.api#documentation": "

This processor converts logs into Open Cybersecurity Schema\n Framework (OCSF) events.

\n

For more information about this processor including examples, see parseToOSCF in the CloudWatch Logs User Guide.

" } }, "com.amazonaws.cloudwatchlogs#ParseVPC": { @@ -9542,6 +9665,12 @@ "traits": { "smithy.api#enumValue": "TRANSFORMER_POLICY" } + }, + "METRIC_EXTRACTION_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "METRIC_EXTRACTION_POLICY" + } } } }, @@ -9734,7 +9863,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an account-level data protection policy, subscription filter policy, or field\n index policy that applies to all log groups or a subset of log groups in the account.

\n

To use this operation, you must be signed on with the correct permissions depending on the\n type of policy that you are creating.

\n \n

\n Data protection policy\n

\n

A data protection policy can help safeguard sensitive data that's ingested by your log\n groups by auditing and masking the sensitive log data. Each account can have only one\n account-level data protection policy.

\n \n

Sensitive data is detected and masked when it is ingested into a log group. When you set\n a data protection policy, log events ingested into the log groups before that time are not\n masked.

\n
\n

If you use PutAccountPolicy to create a data protection policy for your whole\n account, it applies to both existing log groups and all log groups that are created later in\n this account. The account-level policy is applied to existing log groups with eventual\n consistency. It might take up to 5 minutes before sensitive data in existing log groups begins\n to be masked.

\n

By default, when a user views a log event that includes masked data, the sensitive data is\n replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to\n true to view the unmasked log events. Users with the logs:Unmask\n can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs\n Insights query with the unmask query command.

\n

For more information, including a list of types of data that can be audited and masked,\n see Protect sensitive log data\n with masking.

\n

To use the PutAccountPolicy operation for a data protection policy, you must\n be signed on with the logs:PutDataProtectionPolicy and\n logs:PutAccountPolicy permissions.

\n

The PutAccountPolicy operation applies to all log groups in the account. You\n can use PutDataProtectionPolicy to create a data protection policy that applies to just one\n log group. If a log group has its own data protection policy and the account also has an\n account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.

\n

\n Subscription filter policy\n

\n

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to\n both existing log groups and log groups that are created later in this account. Supported\n destinations are Kinesis Data Streams, Firehose, and Lambda. When log\n events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP\n format.

\n

The following destinations are supported for subscription filters:

\n \n

Each account can have one account-level subscription filter policy per Region. If you are\n updating an existing filter, you must specify the correct name in PolicyName. To\n perform a PutAccountPolicy subscription filter operation for any destination\n except a Lambda function, you must also have the iam:PassRole\n permission.

\n

\n Transformer policy\n

\n

Creates or updates a log transformer policy for your account. You use\n log transformers to transform log events into a different format, making them easier for you\n to process and analyze. You can also transform logs from different sources into standardized\n formats that contain relevant, source-specific information. After you have created a\n transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You\n can then refer to the transformed versions of the logs during operations such as querying with\n CloudWatch Logs Insights or creating metric filters or subscription filters.

\n

You can also use a transformer to copy metadata from metadata keys into the log events\n themselves. This metadata can include log group name, log stream name, account ID and\n Region.

\n

A transformer for a log group is a series of processors, where each processor applies one\n type of transformation to the log events ingested into this log group. For more information\n about the available processors to use in a transformer, see Processors that you can use.

\n

Having log events in standardized format enables visibility across your applications for\n your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation\n for common log types with out-of-the-box transformation templates for major Amazon Web Services\n log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use\n pre-built transformation templates or create custom transformation policies.

\n

You can create transformers only for the log groups in the Standard log class.

\n

You can have one account-level transformer policy that applies to all log groups in the\n account. Or you can create as many as 20 account-level transformer policies that are each\n scoped to a subset of log groups with the selectionCriteria parameter. If you\n have multiple account-level transformer policies with selection criteria, no two of them can\n use the same or overlapping log group name prefixes. For example, if you have one policy\n filtered to log groups that start with my-log, you can't have another field index\n policy filtered to my-logpprod or my-logging.

\n

You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with\n PutTransformer and an account-level transformer that could apply to the same\n log group, the log group uses only the log-group level transformer. It ignores the\n account-level transformer.

\n

\n Field index policy\n

\n

You can use field index policies to create indexes on fields found in log events in the\n log group. Creating field indexes can help lower the scan volume for CloudWatch Logs\n Insights queries that reference those fields, because these queries attempt to skip the\n processing of log events that are known to not match the indexed field. Good fields to index\n are fields that you often need to query for and fields or values that match only a small\n fraction of the total log events. Common examples of indexes include request ID, session ID,\n user IDs, or instance IDs. For more information, see Create field indexes\n to improve query performance and reduce costs\n

\n

To find the fields that are in your log group events, use the GetLogGroupFields operation.

\n

For example, suppose you have created a field index for requestId. Then, any\n CloudWatch Logs Insights query on that log group that includes requestId =\n value\n or requestId in [value,\n value, ...] will attempt to process only the log events where\n the indexed field matches the specified value.

\n

Matches of log events to the names of indexed fields are case-sensitive. For example, an\n indexed field of RequestId won't match a log event containing\n requestId.

\n

You can have one account-level field index policy that applies to all log groups in the\n account. Or you can create as many as 20 account-level field index policies that are each\n scoped to a subset of log groups with the selectionCriteria parameter. If you\n have multiple account-level index policies with selection criteria, no two of them can use the\n same or overlapping log group name prefixes. For example, if you have one policy filtered to\n log groups that start with my-log, you can't have another field index policy\n filtered to my-logpprod or my-logging.

\n

If you create an account-level field index policy in a monitoring account in cross-account\n observability, the policy is applied only to the monitoring account and not to any source\n accounts.

\n

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log\n group will use only that log-group level policy, and will ignore the account-level policy that\n you create with PutAccountPolicy.

" + "smithy.api#documentation": "

Creates an account-level data protection policy, subscription filter policy, field index\n policy, transformer policy, or metric extraction policy that applies to all log groups or a\n subset of log groups in the account.

\n

To use this operation, you must be signed on with the correct permissions depending on the\n type of policy that you are creating.

\n \n

\n Data protection policy\n

\n

A data protection policy can help safeguard sensitive data that's ingested by your log\n groups by auditing and masking the sensitive log data. Each account can have only one\n account-level data protection policy.

\n \n

Sensitive data is detected and masked when it is ingested into a log group. When you set\n a data protection policy, log events ingested into the log groups before that time are not\n masked.

\n
\n

If you use PutAccountPolicy to create a data protection policy for your whole\n account, it applies to both existing log groups and all log groups that are created later in\n this account. The account-level policy is applied to existing log groups with eventual\n consistency. It might take up to 5 minutes before sensitive data in existing log groups begins\n to be masked.

\n

By default, when a user views a log event that includes masked data, the sensitive data is\n replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to\n true to view the unmasked log events. Users with the logs:Unmask\n can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs\n Insights query with the unmask query command.

\n

For more information, including a list of types of data that can be audited and masked,\n see Protect sensitive log data\n with masking.

\n

To use the PutAccountPolicy operation for a data protection policy, you must\n be signed on with the logs:PutDataProtectionPolicy and\n logs:PutAccountPolicy permissions.

\n

The PutAccountPolicy operation applies to all log groups in the account. You\n can use PutDataProtectionPolicy to create a data protection policy that applies to just one\n log group. If a log group has its own data protection policy and the account also has an\n account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.

\n

\n Subscription filter policy\n

\n

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to\n both existing log groups and log groups that are created later in this account. Supported\n destinations are Kinesis Data Streams, Firehose, and Lambda. When log\n events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP\n format.

\n

The following destinations are supported for subscription filters:

\n \n

Each account can have one account-level subscription filter policy per Region. If you are\n updating an existing filter, you must specify the correct name in PolicyName. To\n perform a PutAccountPolicy subscription filter operation for any destination\n except a Lambda function, you must also have the iam:PassRole\n permission.

\n

\n Transformer policy\n

\n

Creates or updates a log transformer policy for your account. You use\n log transformers to transform log events into a different format, making them easier for you\n to process and analyze. You can also transform logs from different sources into standardized\n formats that contain relevant, source-specific information. After you have created a\n transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You\n can then refer to the transformed versions of the logs during operations such as querying with\n CloudWatch Logs Insights or creating metric filters or subscription filters.

\n

You can also use a transformer to copy metadata from metadata keys into the log events\n themselves. This metadata can include log group name, log stream name, account ID and\n Region.

\n

A transformer for a log group is a series of processors, where each processor applies one\n type of transformation to the log events ingested into this log group. For more information\n about the available processors to use in a transformer, see Processors that you can use.

\n

Having log events in standardized format enables visibility across your applications for\n your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation\n for common log types with out-of-the-box transformation templates for major Amazon Web Services\n log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use\n pre-built transformation templates or create custom transformation policies.

\n

You can create transformers only for the log groups in the Standard log class.

\n

You can have one account-level transformer policy that applies to all log groups in the\n account. Or you can create as many as 20 account-level transformer policies that are each\n scoped to a subset of log groups with the selectionCriteria parameter. If you\n have multiple account-level transformer policies with selection criteria, no two of them can\n use the same or overlapping log group name prefixes. For example, if you have one policy\n filtered to log groups that start with my-log, you can't have another field index\n policy filtered to my-logpprod or my-logging.

\n

You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with\n PutTransformer and an account-level transformer that could apply to the same\n log group, the log group uses only the log-group level transformer. It ignores the\n account-level transformer.

\n

\n Field index policy\n

\n

You can use field index policies to create indexes on fields found in log events in the\n log group. Creating field indexes can help lower the scan volume for CloudWatch Logs\n Insights queries that reference those fields, because these queries attempt to skip the\n processing of log events that are known to not match the indexed field. Good fields to index\n are fields that you often need to query for and fields or values that match only a small\n fraction of the total log events. Common examples of indexes include request ID, session ID,\n user IDs, or instance IDs. For more information, see Create field indexes\n to improve query performance and reduce costs\n

\n

To find the fields that are in your log group events, use the GetLogGroupFields operation.

\n

For example, suppose you have created a field index for requestId. Then, any\n CloudWatch Logs Insights query on that log group that includes requestId =\n value\n or requestId in [value,\n value, ...] will attempt to process only the log events where\n the indexed field matches the specified value.

\n

Matches of log events to the names of indexed fields are case-sensitive. For example, an\n indexed field of RequestId won't match a log event containing\n requestId.

\n

You can have one account-level field index policy that applies to all log groups in the\n account. Or you can create as many as 20 account-level field index policies that are each\n scoped to a subset of log groups with the selectionCriteria parameter. If you\n have multiple account-level index policies with selection criteria, no two of them can use the\n same or overlapping log group name prefixes. For example, if you have one policy filtered to\n log groups that start with my-log, you can't have another field index policy\n filtered to my-logpprod or my-logging.

\n

If you create an account-level field index policy in a monitoring account in cross-account\n observability, the policy is applied only to the monitoring account and not to any source\n accounts.

\n

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log\n group will use only that log-group level policy, and will ignore the account-level policy that\n you create with PutAccountPolicy.

\n

\n Metric extraction policy\n

\n

A metric extraction policy controls whether CloudWatch Metrics can be created through the\n Embedded Metrics Format (EMF) for log groups in your account. By default, EMF metric creation\n is enabled for all log groups. You can use metric extraction policies to disable EMF metric\n creation for your entire account or specific log groups.

\n

When a policy disables EMF metric creation for a log group, log events in the EMF format\n are still ingested, but no CloudWatch Metrics are created from them.

\n \n

Creating a policy disables metrics for AWS features that use EMF to create metrics, such\n as CloudWatch Container Insights and CloudWatch Application Signals. To prevent turning off\n those features by accident, we recommend that you exclude the underlying log-groups through a\n selection-criteria such as LogGroupNamePrefix NOT IN [\"/aws/containerinsights\",\n \"/aws/ecs/containerinsights\", \"/aws/application-signals/data\"].

\n
\n

Each account can have either one account-level metric extraction policy that applies to\n all log groups, or up to 5 policies that are each scoped to a subset of log groups with the\n selectionCriteria parameter. The selection criteria supports filtering by LogGroupName and\n LogGroupNamePrefix using the operators IN and NOT IN. You can specify up to 50 values in each\n IN or NOT IN list.

\n

The selection criteria can be specified in these formats:

\n

\n LogGroupName IN [\"log-group-1\", \"log-group-2\"]\n

\n

\n LogGroupNamePrefix NOT IN [\"/aws/prefix1\", \"/aws/prefix2\"]\n

\n

If you have multiple account-level metric extraction policies with selection criteria, no\n two of them can have overlapping criteria. For example, if you have one policy with selection\n criteria LogGroupNamePrefix IN [\"my-log\"], you can't have another metric extraction policy\n with selection criteria LogGroupNamePrefix IN [\"/my-log-prod\"] or LogGroupNamePrefix IN\n [\"/my-logging\"], as the set of log groups matching these prefixes would be a subset of the log\n groups matching the first policy's prefix, creating an overlap.

\n

When using NOT IN, only one policy with this operator is allowed per account.

\n

When combining policies with IN and NOT IN operators, the overlap check ensures that\n policies don't have conflicting effects. Two policies with IN and NOT IN operators do not\n overlap if and only if every value in the IN policy is completely contained within some value\n in the NOT IN policy. For example:

\n " } }, "com.amazonaws.cloudwatchlogs#PutAccountPolicyRequest": { @@ -9770,7 +9899,7 @@ "selectionCriteria": { "target": "com.amazonaws.cloudwatchlogs#SelectionCriteria", "traits": { - "smithy.api#documentation": "

Use this parameter to apply the new policy to a subset of log groups in the\n account.

\n

Specifing selectionCriteria is valid only when you specify\n SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or\n TRANSFORMER_POLICYfor policyType.

\n

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported\n selectionCriteria filter is LogGroupName NOT IN []\n

\n

If policyType is FIELD_INDEX_POLICY or\n TRANSFORMER_POLICY, the only supported selectionCriteria filter is\n LogGroupNamePrefix\n

\n

The selectionCriteria string can be up to 25KB in length. The length is\n determined by using its UTF-8 bytes.

\n

Using the selectionCriteria parameter with\n SUBSCRIPTION_FILTER_POLICY is useful to help prevent infinite loops. For more\n information, see Log recursion\n prevention.

" + "smithy.api#documentation": "

Use this parameter to apply the new policy to a subset of log groups in the\n account.

\n

Specifying selectionCriteria is valid only when you specify\n SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or\n TRANSFORMER_POLICYfor policyType.

\n

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported\n selectionCriteria filter is LogGroupName NOT IN []\n

\n

If policyType is FIELD_INDEX_POLICY or\n TRANSFORMER_POLICY, the only supported selectionCriteria filter is\n LogGroupNamePrefix\n

\n

The selectionCriteria string can be up to 25KB in length. The length is\n determined by using its UTF-8 bytes.

\n

Using the selectionCriteria parameter with\n SUBSCRIPTION_FILTER_POLICY is useful to help prevent infinite loops. For more\n information, see Log recursion\n prevention.

" } } }, @@ -10068,7 +10197,7 @@ "logType": { "target": "com.amazonaws.cloudwatchlogs#LogType", "traits": { - "smithy.api#documentation": "

Defines the type of log that the source is sending.

\n ", + "smithy.api#documentation": "

Defines the type of log that the source is sending.

\n ", "smithy.api#required": {} } }, @@ -12981,7 +13110,7 @@ "smithy.api#deprecated": { "message": "Please use the generic tagging API UntagResource" }, - "smithy.api#documentation": "\n

The UntagLogGroup operation is on the path to deprecation. We recommend that you use\n UntagResource instead.

\n
\n

Removes the specified tags from the specified log group.

\n

To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource.

\n

CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified\n tags to log groups using the aws:Resource/key-name\n or\n aws:TagKeys condition keys.

" + "smithy.api#documentation": "\n

The UntagLogGroup operation is on the path to deprecation. We recommend that you use\n UntagResource instead.

\n
\n

Removes the specified tags from the specified log group.

\n

To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource.

\n

When using IAM policies to control tag management for CloudWatch Logs log groups, the\n condition keys aws:Resource/key-name and aws:TagKeys cannot be used to restrict which tags\n users can assign.

" } }, "com.amazonaws.cloudwatchlogs#UntagLogGroupRequest": { diff --git a/aws-models/mediaconvert.json b/aws-models/mediaconvert.json index e9e6d7b6ec33..75b715fd784a 100644 --- a/aws-models/mediaconvert.json +++ b/aws-models/mediaconvert.json @@ -13892,7 +13892,7 @@ "FileInput": { "target": "com.amazonaws.mediaconvert#__stringMax2048PatternS3Https", "traits": { - "smithy.api#documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL.", + "smithy.api#documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. For standard inputs, provide the path to your S3, HTTP, or HTTPS source file. For example, s3://amzn-s3-demo-bucket/input.mp4 for an Amazon S3 input or https://example.com/input.mp4 for an HTTPS input. For TAMS inputs, specify the HTTPS endpoint of your TAMS server. For example, https://tams-server.example.com . When you do, also specify Source ID, Timerange, GAP handling, and the Authorization connection ARN under TAMS settings. (Don't include these parameters in the Input file URL.) For IMF inputs, specify your input by providing the path to your CPL. For example, s3://amzn-s3-demo-bucket/vf/cpl.xml . If the CPL is in an incomplete IMP, make sure to use Supplemental IMPsto specify any supplemental IMPs that contain assets referenced by the CPL.", "smithy.api#jsonName": "fileInput" } }, @@ -13959,6 +13959,13 @@ "smithy.api#jsonName": "supplementalImps" } }, + "TamsSettings": { + "target": "com.amazonaws.mediaconvert#InputTamsSettings", + "traits": { + "smithy.api#documentation": "Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it", + "smithy.api#jsonName": "tamsSettings" + } + }, "TimecodeSource": { "target": "com.amazonaws.mediaconvert#InputTimecodeSource", "traits": { @@ -14247,6 +14254,42 @@ "smithy.api#documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." } }, + "com.amazonaws.mediaconvert#InputTamsSettings": { + "type": "structure", + "members": { + "AuthConnectionArn": { + "target": "com.amazonaws.mediaconvert#__stringPatternArnAwsAZ09EventsAZ090912ConnectionAZAZ09AF0936", + "traits": { + "smithy.api#documentation": "Specify the ARN (Amazon Resource Name) of an EventBridge Connection to authenticate with your TAMS server. The EventBridge Connection stores your authentication credentials securely. MediaConvert assumes your job's IAM role to access this connection, so ensure the role has the events:RetrieveConnectionCredentials, secretsmanager:DescribeSecret, and secretsmanager:GetSecretValue permissions. Format: arn:aws:events:region:account-id:connection/connection-name/unique-id", + "smithy.api#jsonName": "authConnectionArn" + } + }, + "GapHandling": { + "target": "com.amazonaws.mediaconvert#TamsGapHandling", + "traits": { + "smithy.api#documentation": "Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps.", + "smithy.api#jsonName": "gapHandling" + } + }, + "SourceId": { + "target": "com.amazonaws.mediaconvert#__string", + "traits": { + "smithy.api#documentation": "Specify the unique identifier for the media source in your TAMS server. MediaConvert uses this source ID to locate the appropriate flows containing the media segments you want to process. The source ID corresponds to a specific media source registered in your TAMS server. This source must be of type urn:x-nmos:format:multi, and can can reference multiple flows for audio, video, or combined audio/video content. MediaConvert automatically selects the highest quality flows available for your job. This setting is required when include TAMS settings in your job.", + "smithy.api#jsonName": "sourceId" + } + }, + "Timerange": { + "target": "com.amazonaws.mediaconvert#__stringPattern019090190908019090190908", + "traits": { + "smithy.api#documentation": "Specify the time range of media segments to retrieve from your TAMS server. MediaConvert fetches only the segments that fall within this range. Use the format specified by your TAMS server implementation. This must be two timestamp values with the format {sign?}{seconds}:{nanoseconds}, separated by an underscore, surrounded by either parentheses or square brackets. Example: [15:0_35:0) This setting is required when include TAMS settings in your job.", + "smithy.api#jsonName": "timerange" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it" + } + }, "com.amazonaws.mediaconvert#InputTemplate": { "type": "structure", "members": { @@ -24024,6 +24067,32 @@ "smithy.api#output": {} } }, + "com.amazonaws.mediaconvert#TamsGapHandling": { + "type": "enum", + "members": { + "SKIP_GAPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIP_GAPS" + } + }, + "FILL_WITH_BLACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FILL_WITH_BLACK" + } + }, + "HOLD_LAST_FRAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HOLD_LAST_FRAME" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps." + } + }, "com.amazonaws.mediaconvert#TeletextDestinationSettings": { "type": "structure", "members": { @@ -25940,7 +26009,7 @@ "Height": { "target": "com.amazonaws.mediaconvert#__integerMin0Max2147483647", "traits": { - "smithy.api#documentation": "Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high.", + "smithy.api#documentation": "Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high.", "smithy.api#jsonName": "height" } }, @@ -25954,7 +26023,7 @@ "Width": { "target": "com.amazonaws.mediaconvert#__integerMin0Max2147483647", "traits": { - "smithy.api#documentation": "Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide.", + "smithy.api#documentation": "Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide.", "smithy.api#jsonName": "width" } }, @@ -29511,6 +29580,12 @@ "smithy.api#pattern": "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}(@[0-9]+(\\.[0-9]+)?(:[0-9]+)?)?$" } }, + "com.amazonaws.mediaconvert#__stringPattern019090190908019090190908": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(\\[|\\()?(-?(0|[1-9][0-9]*):(0|[1-9][0-9]{0,8}))?(_(-?(0|[1-9][0-9]*):(0|[1-9][0-9]{0,8}))?)?(\\]|\\))?$" + } + }, "com.amazonaws.mediaconvert#__stringPattern01D20305D205D": { "type": "string", "traits": { @@ -29559,6 +29634,12 @@ "smithy.api#pattern": "^[A-Za-z]{2,3}(-[A-Za-z0-9-]+)?$" } }, + "com.amazonaws.mediaconvert#__stringPatternArnAwsAZ09EventsAZ090912ConnectionAZAZ09AF0936": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws[a-z0-9-]*:events:[a-z0-9-]+:[0-9]{12}:connection/[a-zA-Z0-9-]+/[a-f0-9-]{36}$" + } + }, "com.amazonaws.mediaconvert#__stringPatternArnAwsUsGovAcm": { "type": "string", "traits": { diff --git a/aws-models/outposts.json b/aws-models/outposts.json index fdb5ce7507c8..00238a21373c 100644 --- a/aws-models/outposts.json +++ b/aws-models/outposts.json @@ -276,7 +276,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

" + "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or\n an Outposts server configuration.

" } }, "RackId": { @@ -326,7 +326,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

" + "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an\n Outposts server configuration.

" } }, "AccountId": { @@ -803,7 +803,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

" + "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an\n Outposts server configuration.

" } }, "CapacityTaskStatus": { @@ -1691,7 +1691,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "

The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

" + "smithy.api#documentation": "

The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts\n rack or an Outposts server configuration.

" } }, "RequestedInstancePools": { @@ -1716,7 +1716,7 @@ "CapacityTaskStatus": { "target": "com.amazonaws.outposts#CapacityTaskStatus", "traits": { - "smithy.api#documentation": "

Status of the capacity task.

\n

A capacity task can have one of the following statuses:

\n " + "smithy.api#documentation": "

Status of the capacity task.

\n

A capacity task can have one of the following statuses:

\n " } }, "Failed": { @@ -1763,6 +1763,9 @@ "target": "com.amazonaws.outposts#GetCatalogItemOutput" }, "errors": [ + { + "target": "com.amazonaws.outposts#AccessDeniedException" + }, { "target": "com.amazonaws.outposts#InternalServerException" }, @@ -1965,13 +1968,98 @@ } } }, + "com.amazonaws.outposts#GetOutpostBillingInformation": { + "type": "operation", + "input": { + "target": "com.amazonaws.outposts#GetOutpostBillingInformationInput" + }, + "output": { + "target": "com.amazonaws.outposts#GetOutpostBillingInformationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.outposts#AccessDeniedException" + }, + { + "target": "com.amazonaws.outposts#InternalServerException" + }, + { + "target": "com.amazonaws.outposts#NotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets current and historical billing information about the specified Outpost.

", + "smithy.api#http": { + "method": "GET", + "uri": "/outpost/{OutpostIdentifier}/billing-information", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Subscriptions", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.outposts#GetOutpostBillingInformationInput": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.outposts#Token", + "traits": { + "smithy.api#httpQuery": "NextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.outposts#MaxResults1000", + "traits": { + "smithy.api#httpQuery": "MaxResults" + } + }, + "OutpostIdentifier": { + "target": "com.amazonaws.outposts#OutpostIdentifier", + "traits": { + "smithy.api#documentation": "

The ID or ARN of the Outpost.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.outposts#GetOutpostBillingInformationOutput": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.outposts#Token" + }, + "Subscriptions": { + "target": "com.amazonaws.outposts#SubscriptionList", + "traits": { + "smithy.api#documentation": "

The subscription details for the specified Outpost.

" + } + }, + "ContractEndDate": { + "target": "com.amazonaws.outposts#String", + "traits": { + "smithy.api#documentation": "

The date the current contract term ends for the specified Outpost. You must start the renewal or\n decommission process at least 5 business days before the current term for your\n Amazon Web Services Outposts ends. Failing to complete these steps at least 5 business days before the\n current term ends might result in unanticipated charges.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.outposts#GetOutpostInput": { "type": "structure", "members": { "OutpostId": { "target": "com.amazonaws.outposts#OutpostId", "traits": { - "smithy.api#documentation": "

The ID or ARN of the Outpost.

", + "smithy.api#documentation": "

The ID or ARN of the Outpost.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2138,7 +2226,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetIdInput", "traits": { - "smithy.api#documentation": "

The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

", + "smithy.api#documentation": "

The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts\n rack or an Outposts server configuration.

", "smithy.api#httpQuery": "AssetId" } }, @@ -2559,7 +2647,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

" + "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or\n an Outposts server configuration.

" } }, "MacAddressList": { @@ -3092,6 +3180,9 @@ "target": "com.amazonaws.outposts#ListCatalogItemsOutput" }, "errors": [ + { + "target": "com.amazonaws.outposts#AccessDeniedException" + }, { "target": "com.amazonaws.outposts#InternalServerException" }, @@ -3589,6 +3680,9 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.outposts#NullableDouble": { + "type": "double" + }, "com.amazonaws.outposts#OpticalStandard": { "type": "enum", "members": { @@ -3744,6 +3838,12 @@ "smithy.api#pattern": "^oo-[a-f0-9]{17}$" } }, + "com.amazonaws.outposts#OrderIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.outposts#String" + } + }, "com.amazonaws.outposts#OrderStatus": { "type": "enum", "members": { @@ -4060,6 +4160,9 @@ { "target": "com.amazonaws.outposts#GetOutpost" }, + { + "target": "com.amazonaws.outposts#GetOutpostBillingInformation" + }, { "target": "com.amazonaws.outposts#GetOutpostInstanceTypes" }, @@ -5630,7 +5733,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts the specified capacity task. You can have one active capacity task for each order and each Outpost.

", + "smithy.api#documentation": "

Starts the specified capacity task. You can have one active capacity task for each order\n and each Outpost.

", "smithy.api#http": { "method": "POST", "uri": "/outposts/{OutpostIdentifier}/capacity", @@ -5658,7 +5761,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetIdInput", "traits": { - "smithy.api#documentation": "

The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

" + "smithy.api#documentation": "

The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts\n rack or an Outposts server configuration.

" } }, "InstancePools": { @@ -5716,7 +5819,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.

" + "smithy.api#documentation": "

The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an\n Outposts server configuration.

" } }, "RequestedInstancePools": { @@ -5904,6 +6007,114 @@ "smithy.api#pattern": "^[\\S \\n]+$" } }, + "com.amazonaws.outposts#Subscription": { + "type": "structure", + "members": { + "SubscriptionId": { + "target": "com.amazonaws.outposts#String", + "traits": { + "smithy.api#documentation": "

The ID of the subscription that appears on the Amazon Web Services Billing Center console.

" + } + }, + "SubscriptionType": { + "target": "com.amazonaws.outposts#SubscriptionType", + "traits": { + "smithy.api#documentation": "

The type of subscription which can be one of the following:

\n " + } + }, + "SubscriptionStatus": { + "target": "com.amazonaws.outposts#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "

The status of subscription which can be one of the following:

\n " + } + }, + "OrderIds": { + "target": "com.amazonaws.outposts#OrderIdList", + "traits": { + "smithy.api#documentation": "

The order ID for your subscription.

" + } + }, + "BeginDate": { + "target": "com.amazonaws.outposts#ISO8601Timestamp", + "traits": { + "smithy.api#documentation": "

The date your subscription starts.

" + } + }, + "EndDate": { + "target": "com.amazonaws.outposts#ISO8601Timestamp", + "traits": { + "smithy.api#documentation": "

The date your subscription ends.

" + } + }, + "MonthlyRecurringPrice": { + "target": "com.amazonaws.outposts#NullableDouble", + "traits": { + "smithy.api#documentation": "

The amount you are billed each month in the subscription period.

" + } + }, + "UpfrontPrice": { + "target": "com.amazonaws.outposts#NullableDouble", + "traits": { + "smithy.api#documentation": "

The amount billed when the subscription is created. This is a one-time charge.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information about your Amazon Web Services Outposts subscriptions.

" + } + }, + "com.amazonaws.outposts#SubscriptionList": { + "type": "list", + "member": { + "target": "com.amazonaws.outposts#Subscription" + } + }, + "com.amazonaws.outposts#SubscriptionStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INACTIVE" + } + }, + "CANCELLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLED" + } + } + } + }, + "com.amazonaws.outposts#SubscriptionType": { + "type": "enum", + "members": { + "ORIGINAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ORIGINAL" + } + }, + "RENEWAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RENEWAL" + } + }, + "CAPACITY_INCREASE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CAPACITY_INCREASE" + } + } + } + }, "com.amazonaws.outposts#SupportedHardwareType": { "type": "enum", "members": { @@ -6458,7 +6669,7 @@ "PowerConnector": { "target": "com.amazonaws.outposts#PowerConnector", "traits": { - "smithy.api#documentation": "

The power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.

\n " + "smithy.api#documentation": "

The power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.

\n " } }, "PowerFeedDrop": { diff --git a/aws-models/sdk-endpoints.json b/aws-models/sdk-endpoints.json index 7bd1df098585..cfdd7a63e74f 100644 --- a/aws-models/sdk-endpoints.json +++ b/aws-models/sdk-endpoints.json @@ -22815,6 +22815,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "securityhub.ap-northeast-1.api.aws", diff --git a/aws-models/sesv2.json b/aws-models/sesv2.json index c7acc65a1481..4236669999bc 100644 --- a/aws-models/sesv2.json +++ b/aws-models/sesv2.json @@ -2284,14 +2284,14 @@ "WarmupStatus": { "target": "com.amazonaws.sesv2#WarmupStatus", "traits": { - "smithy.api#documentation": "

The warm-up status of a dedicated IP address. The status can have one of the following\n values:

\n ", + "smithy.api#documentation": "

The warm-up status of a dedicated IP address. The status can have one of the following\n values:

\n ", "smithy.api#required": {} } }, "WarmupPercentage": { "target": "com.amazonaws.sesv2#Percentage100Wrapper", "traits": { - "smithy.api#documentation": "

Indicates how complete the dedicated IP warm-up process is. When this value equals 1,\n the address has completed the warm-up process and is ready for use.

", + "smithy.api#documentation": "

Indicates the progress of your dedicated IP warm-up:

\n ", "smithy.api#required": {} } }, @@ -14070,6 +14070,12 @@ "traits": { "smithy.api#enumValue": "DONE" } + }, + "NOT_APPLICABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_APPLICABLE" + } } }, "traits": { diff --git a/aws-models/ssm.json b/aws-models/ssm.json index aac24c4efb99..b29405f9a26f 100644 --- a/aws-models/ssm.json +++ b/aws-models/ssm.json @@ -949,7 +949,7 @@ "name": "ssm" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure\n end-to-end management solution for hybrid cloud environments that enables safe and secure\n operations at scale.

\n

This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up\n Amazon Web Services Systems Manager.

\n

\n Related resources\n

\n ", + "smithy.api#documentation": "

Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure\n end-to-end management solution for hybrid cloud environments that enables safe and secure\n operations at scale.

\n

This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up\n Amazon Web Services Systems Manager.

\n

\n Related resources\n

\n ", "smithy.api#title": "Amazon Simple Systems Manager (SSM)", "smithy.api#xmlNamespace": { "uri": "http://ssm.amazonaws.com/doc/2014-11-06/" @@ -5328,7 +5328,7 @@ "ExecutionTime": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

The time the execution ran as a datetime object that is saved in the following format:\n yyyy-MM-dd'T'HH:mm:ss'Z'\n

", + "smithy.api#documentation": "

The time the execution ran as a datetime object that is saved in the following format:\n yyyy-MM-dd'T'HH:mm:ss'Z'\n

\n \n

For State Manager associations, this timestamp represents when the compliance status was\n captured and reported by the Systems Manager service, not when the underlying association was actually\n executed on the managed node. To track actual association execution times, use the DescribeAssociationExecutionTargets command or check the association execution\n history in the Systems Manager console.

\n
", "smithy.api#required": {} } }, @@ -5409,7 +5409,7 @@ "ExecutionSummary": { "target": "com.amazonaws.ssm#ComplianceExecutionSummary", "traits": { - "smithy.api#documentation": "

A summary for the compliance item. The summary includes an execution ID, the execution type\n (for example, command), and the execution time.

" + "smithy.api#documentation": "

A summary for the compliance item. The summary includes an execution ID, the execution type\n (for example, command), and the execution time.

\n \n

For State Manager associations, the ExecutionTime value represents when the\n compliance status was captured and aggregated by the Systems Manager service, not necessarily when the\n underlying association was executed on the managed node. State Manager updates compliance status\n for all associations on an instance whenever any association executes, which means multiple\n associations may show the same execution time even if they were executed at different\n times.

\n
" } }, "Details": { @@ -10244,7 +10244,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the parameters in your Amazon Web Services account or the parameters shared with you when you enable\n the Shared option.

\n

Request results are returned on a best-effort basis. If you specify MaxResults\n in the request, the response includes information up to the limit specified. The number of items\n returned, however, can be between zero and the value of MaxResults. If the service\n reaches an internal limit while processing the results, it stops the operation and returns the\n matching values up to that point and a NextToken. You can specify the\n NextToken in a subsequent call to get the next set of results.

\n \n

If you change the KMS key alias for the KMS key used to encrypt a parameter,\n then you must also update the key alias the parameter uses to reference KMS. Otherwise,\n DescribeParameters retrieves whatever the original key alias was\n referencing.

\n
", + "smithy.api#documentation": "

Lists the parameters in your Amazon Web Services account or the parameters shared with you when you enable\n the Shared option.

\n

Request results are returned on a best-effort basis. If you specify MaxResults\n in the request, the response includes information up to the limit specified. The number of items\n returned, however, can be between zero and the value of MaxResults. If the service\n reaches an internal limit while processing the results, it stops the operation and returns the\n matching values up to that point and a NextToken. You can specify the\n NextToken in a subsequent call to get the next set of results.

\n

Parameter names can't contain spaces. The service removes any spaces specified for the\n beginning or end of a parameter name. If the specified name for a parameter contains spaces\n between characters, the request fails with a ValidationException error.

\n \n

If you change the KMS key alias for the KMS key used to encrypt a parameter,\n then you must also update the key alias the parameter uses to reference KMS. Otherwise,\n DescribeParameters retrieves whatever the original key alias was\n referencing.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -14260,7 +14260,7 @@ } ], "traits": { - "smithy.api#documentation": "

Get information about a single parameter by specifying the parameter name.

\n \n

To get information about more than one parameter at a time, use the GetParameters operation.

\n
" + "smithy.api#documentation": "

Get information about a single parameter by specifying the parameter name.

\n

Parameter names can't contain spaces. The service removes any spaces specified for the\n beginning or end of a parameter name. If the specified name for a parameter contains spaces\n between characters, the request fails with a ValidationException error.

\n \n

To get information about more than one parameter at a time, use the GetParameters operation.

\n
" } }, "com.amazonaws.ssm#GetParameterHistory": { @@ -14286,7 +14286,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the history of all changes to a parameter.

\n \n

If you change the KMS key alias for the KMS key used to encrypt a parameter,\n then you must also update the key alias the parameter uses to reference KMS. Otherwise,\n GetParameterHistory retrieves whatever the original key alias was\n referencing.

\n
", + "smithy.api#documentation": "

Retrieves the history of all changes to a parameter.

\n

Parameter names can't contain spaces. The service removes any spaces specified for the\n beginning or end of a parameter name. If the specified name for a parameter contains spaces\n between characters, the request fails with a ValidationException error.

\n \n

If you change the KMS key alias for the KMS key used to encrypt a parameter,\n then you must also update the key alias the parameter uses to reference KMS. Otherwise,\n GetParameterHistory retrieves whatever the original key alias was\n referencing.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -14401,7 +14401,7 @@ } ], "traits": { - "smithy.api#documentation": "

Get information about one or more parameters by specifying multiple parameter names.

\n \n

To get information about a single parameter, you can use the GetParameter\n operation instead.

\n
" + "smithy.api#documentation": "

Get information about one or more parameters by specifying multiple parameter names.

\n \n

To get information about a single parameter, you can use the GetParameter\n operation instead.

\n
\n

Parameter names can't contain spaces. The service removes any spaces specified for the\n beginning or end of a parameter name. If the specified name for a parameter contains spaces\n between characters, the request fails with a ValidationException error.

" } }, "com.amazonaws.ssm#GetParametersByPath": { @@ -14433,7 +14433,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieve information about one or more parameters under a specified level in a hierarchy.

\n

Request results are returned on a best-effort basis. If you specify MaxResults\n in the request, the response includes information up to the limit specified. The number of items\n returned, however, can be between zero and the value of MaxResults. If the service\n reaches an internal limit while processing the results, it stops the operation and returns the\n matching values up to that point and a NextToken. You can specify the\n NextToken in a subsequent call to get the next set of results.

", + "smithy.api#documentation": "

Retrieve information about one or more parameters under a specified level in a hierarchy.

\n

Request results are returned on a best-effort basis. If you specify MaxResults\n in the request, the response includes information up to the limit specified. The number of items\n returned, however, can be between zero and the value of MaxResults. If the service\n reaches an internal limit while processing the results, it stops the operation and returns the\n matching values up to that point and a NextToken. You can specify the\n NextToken in a subsequent call to get the next set of results.

\n

Parameter names can't contain spaces. The service removes any spaces specified for the\n beginning or end of a parameter name. If the specified name for a parameter contains spaces\n between characters, the request fails with a ValidationException error.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -17390,7 +17390,7 @@ "Values": { "target": "com.amazonaws.ssm#InventoryFilterValueList", "traits": { - "smithy.api#documentation": "

Inventory filter values. Example: inventory filter where managed node IDs are specified as\n values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g,\n i-1a2b3c4d5e6,Type=Equal.

", + "smithy.api#documentation": "

Inventory filter values.

", "smithy.api#required": {} } }, @@ -17402,7 +17402,7 @@ } }, "traits": { - "smithy.api#documentation": "

One or more filters. Use a filter to return a more specific list of results.

" + "smithy.api#documentation": "

One or more filters. Use a filter to return a more specific list of results.

\n

\n Example formats for the aws ssm get-inventory\n command:\n

\n

\n --filters\n Key=AWS:InstanceInformation.AgentType,Values=amazon-ssm-agent,Type=Equal\n

\n

\n --filters\n Key=AWS:InstanceInformation.AgentVersion,Values=3.3.2299.0,Type=Equal\n

\n

\n --filters\n Key=AWS:InstanceInformation.ComputerName,Values=ip-192.0.2.0.us-east-2.compute.internal,Type=Equal\n

\n

\n --filters\n Key=AWS:InstanceInformation.InstanceId,Values=i-0a4cd6ceffEXAMPLE,i-1a2b3c4d5e6EXAMPLE,Type=Equal\n

\n

\n --filters\n Key=AWS:InstanceInformation.InstanceStatus,Values=Active,Type=Equal\n

\n

\n --filters\n Key=AWS:InstanceInformation.IpAddress,Values=198.51.100.0,Type=Equal\n

\n

\n --filters Key=AWS:InstanceInformation.PlatformName,Values=\"Amazon\n Linux\",Type=Equal\n

\n

\n --filters\n Key=AWS:InstanceInformation.PlatformType,Values=Linux,Type=Equal\n

\n

\n --filters\n Key=AWS:InstanceInformation.PlatformVersion,Values=2023,Type=BeginWith\n

\n

\n --filters\n Key=AWS:InstanceInformation.ResourceType,Values=EC2Instance,Type=Equal\n

" } }, "com.amazonaws.ssm#InventoryFilterKey": { @@ -17969,7 +17969,7 @@ } ], "traits": { - "smithy.api#documentation": "

A parameter label is a user-defined alias to help you manage different versions of a\n parameter. When you modify a parameter, Amazon Web Services Systems Manager automatically saves a new version and\n increments the version number by one. A label can help you remember the purpose of a parameter\n when there are multiple versions.

\n

Parameter labels have the following requirements and restrictions.

\n " + "smithy.api#documentation": "

A parameter label is a user-defined alias to help you manage different versions of a\n parameter. When you modify a parameter, Amazon Web Services Systems Manager automatically saves a new version and\n increments the version number by one. A label can help you remember the purpose of a parameter\n when there are multiple versions.

\n

Parameter labels have the following requirements and restrictions.

\n " } }, "com.amazonaws.ssm#LabelParameterVersionRequest": { @@ -25507,7 +25507,7 @@ "Configuration": { "target": "com.amazonaws.ssm#PatchSourceConfiguration", "traits": { - "smithy.api#documentation": "

The value of the yum repo configuration. For example:

\n

\n [main]\n

\n

\n name=MyCustomRepository\n

\n

\n baseurl=https://my-custom-repository\n

\n

\n enabled=1\n

\n \n

For information about other options available for your yum repository configuration, see\n dnf.conf(5).

\n
", + "smithy.api#documentation": "

The value of the repo configuration.

\n

\n Example for yum repositories\n

\n

\n [main]\n

\n

\n name=MyCustomRepository\n

\n

\n baseurl=https://my-custom-repository\n

\n

\n enabled=1\n

\n

For information about other options available for your yum repository configuration, see\n dnf.conf(5) on the\n man7.org website.

\n

\n Examples for Ubuntu Server and Debian Server\n

\n

\n deb http://security.ubuntu.com/ubuntu jammy main\n

\n

\n deb https://site.example.com/debian distribution component1 component2 component3\n

\n

Repo information for Ubuntu Server repositories must be specifed in a single line. For more\n examples and information, see jammy (5)\n sources.list.5.gz on the Ubuntu Server Manuals website and sources.list format on the\n Debian Wiki.

", "smithy.api#required": {} } } @@ -25794,7 +25794,7 @@ } ], "traits": { - "smithy.api#documentation": "

Registers a compliance type and other compliance details on a designated resource. This\n operation lets you register custom compliance details with a resource. This call overwrites\n existing compliance information on the resource, so you must provide a full list of compliance\n items each time that you send the request.

\n

ComplianceType can be one of the following:

\n " + "smithy.api#documentation": "

Registers a compliance type and other compliance details on a designated resource. This\n operation lets you register custom compliance details with a resource. This call overwrites\n existing compliance information on the resource, so you must provide a full list of compliance\n items each time that you send the request.

\n

ComplianceType can be one of the following:

\n " } }, "com.amazonaws.ssm#PutComplianceItemsRequest": { @@ -26013,7 +26013,7 @@ "Name": { "target": "com.amazonaws.ssm#PSParameterName", "traits": { - "smithy.api#documentation": "

The fully qualified name of the parameter that you want to create or update.

\n \n

You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name\n itself.

\n
\n

The fully qualified name includes the complete hierarchy of the parameter path and name. For\n parameters in a hierarchy, you must include a leading forward slash character (/) when you create\n or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13\n

\n

Naming Constraints:

\n \n

For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

\n \n

The reported maximum length of 2048 characters for a parameter name includes 1037\n characters that are reserved for internal use by Systems Manager. The maximum length for a parameter name\n that you specify is 1011 characters.

\n

This count of 1011 characters includes the characters in the ARN that precede the name you\n specify. This ARN length will vary depending on your partition and Region. For example, the\n following 45 characters count toward the 1011 character maximum for a parameter created in the\n US East (Ohio) Region: arn:aws:ssm:us-east-2:111122223333:parameter/.

\n
", + "smithy.api#documentation": "

The fully qualified name of the parameter that you want to create or update.

\n \n

You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name\n itself.

\n
\n

The fully qualified name includes the complete hierarchy of the parameter path and name. For\n parameters in a hierarchy, you must include a leading forward slash character (/) when you create\n or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13\n

\n

Naming Constraints:

\n \n

For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

\n \n

The reported maximum length of 2048 characters for a parameter name includes 1037\n characters that are reserved for internal use by Systems Manager. The maximum length for a parameter name\n that you specify is 1011 characters.

\n

This count of 1011 characters includes the characters in the ARN that precede the name you\n specify. This ARN length will vary depending on your partition and Region. For example, the\n following 45 characters count toward the 1011 character maximum for a parameter created in the\n US East (Ohio) Region: arn:aws:ssm:us-east-2:111122223333:parameter/.

\n
", "smithy.api#required": {} } }, @@ -28450,7 +28450,7 @@ "AccessType": { "target": "com.amazonaws.ssm#AccessType", "traits": { - "smithy.api#documentation": "

\n Standard access type is the default for Session Manager sessions.\n JustInTime is the access type for Just-in-time node access.

" + "smithy.api#documentation": "

\n Standard access type is the default for Session Manager sessions.\n JustInTime is the access type for Just-in-time node access.

" } } }, @@ -30402,7 +30402,7 @@ } ], "traits": { - "smithy.api#documentation": "

Remove a label or labels from a parameter.

" + "smithy.api#documentation": "

Remove a label or labels from a parameter.

\n

Parameter names can't contain spaces. The service removes any spaces specified for the\n beginning or end of a parameter name. If the specified name for a parameter contains spaces\n between characters, the request fails with a ValidationException error.

" } }, "com.amazonaws.ssm#UnlabelParameterVersionRequest": { diff --git a/examples/cross_service/rest_ses/Cargo.toml b/examples/cross_service/rest_ses/Cargo.toml index 19dfbb669e2c..882ea6766822 100644 --- a/examples/cross_service/rest_ses/Cargo.toml +++ b/examples/cross_service/rest_ses/Cargo.toml @@ -30,7 +30,7 @@ tracing-bunyan-formatter = "0.3.4" tracing-log = "0.1.3" xlsxwriter = "0.6.0" aws-config= { version = "1.8.2", path = "../../../sdk/aws-config" } -aws-sdk-cloudwatchlogs= { version = "1.93.0", path = "../../../sdk/cloudwatchlogs" } +aws-sdk-cloudwatchlogs= { version = "1.94.0", path = "../../../sdk/cloudwatchlogs" } aws-sdk-rdsdata= { version = "1.78.0", path = "../../../sdk/rdsdata" } aws-sdk-ses= { version = "1.79.0", path = "../../../sdk/ses" } aws-smithy-types= { version = "1.3.2", path = "../../../sdk/aws-smithy-types" } diff --git a/examples/examples/cloudwatchlogs/Cargo.toml b/examples/examples/cloudwatchlogs/Cargo.toml index e9e6e8735ed8..f7b73d465116 100644 --- a/examples/examples/cloudwatchlogs/Cargo.toml +++ b/examples/examples/cloudwatchlogs/Cargo.toml @@ -12,7 +12,7 @@ tracing = "0.1.40" async-recursion = "1.0.5" futures = "0.3.30" aws-config= { version = "1.8.2", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } -aws-sdk-cloudwatchlogs= { version = "1.93.0", path = "../../../sdk/cloudwatchlogs", features = ["test-util"] } +aws-sdk-cloudwatchlogs= { version = "1.94.0", path = "../../../sdk/cloudwatchlogs", features = ["test-util"] } aws-types= { version = "1.3.7", path = "../../../sdk/aws-types" } [dependencies.tokio] diff --git a/examples/examples/ec2/Cargo.toml b/examples/examples/ec2/Cargo.toml index 67413e7b3596..aadebee05765 100644 --- a/examples/examples/ec2/Cargo.toml +++ b/examples/examples/ec2/Cargo.toml @@ -12,7 +12,7 @@ mockall = "0.13.0" inquire = "0.7.5" reqwest = "0.12.5" aws-smithy-runtime-api= { version = "1.8.3", path = "../../../sdk/aws-smithy-runtime-api" } -aws-sdk-ssm= { version = "1.85.0", path = "../../../sdk/ssm" } +aws-sdk-ssm= { version = "1.85.1", path = "../../../sdk/ssm" } aws-smithy-async= { version = "1.2.5", path = "../../../sdk/aws-smithy-async" } aws-config= { version = "1.8.2", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } aws-sdk-ec2= { version = "1.148.0", path = "../../../sdk/ec2" } diff --git a/examples/examples/ses/Cargo.toml b/examples/examples/ses/Cargo.toml index 1c3a45554685..4bc0144b9d2c 100644 --- a/examples/examples/ses/Cargo.toml +++ b/examples/examples/ses/Cargo.toml @@ -14,7 +14,7 @@ open = "5.1.2" aws-smithy-http= { version = "0.62.1", path = "../../../sdk/aws-smithy-http" } aws-smithy-mocks-experimental= { version = "0.2.4", path = "../../../sdk/aws-smithy-mocks-experimental" } aws-config= { version = "1.8.2", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } -aws-sdk-sesv2= { version = "1.87.0", path = "../../../sdk/sesv2", features = ["test-util"] } +aws-sdk-sesv2= { version = "1.88.0", path = "../../../sdk/sesv2", features = ["test-util"] } [dependencies.tokio] version = "1.20.1" diff --git a/examples/examples/ssm/Cargo.toml b/examples/examples/ssm/Cargo.toml index 6e129415b1f2..db55dc15c596 100644 --- a/examples/examples/ssm/Cargo.toml +++ b/examples/examples/ssm/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] aws-config= { version = "1.8.2", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } -aws-sdk-ssm= { version = "1.85.0", path = "../../../sdk/ssm" } +aws-sdk-ssm= { version = "1.85.1", path = "../../../sdk/ssm" } [dependencies.tokio] version = "1.20.1" diff --git a/sdk/auditmanager/Cargo.toml b/sdk/auditmanager/Cargo.toml index 50ad37c6c6de..effde2ab192c 100644 --- a/sdk/auditmanager/Cargo.toml +++ b/sdk/auditmanager/Cargo.toml @@ -1,7 +1,7 @@ # Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. [package] name = "aws-sdk-auditmanager" -version = "1.78.0" +version = "1.79.0" authors = ["AWS Rust SDK Team ", "Russell Cohen "] description = "AWS SDK for AWS Audit Manager" edition = "2021" diff --git a/sdk/auditmanager/README.md b/sdk/auditmanager/README.md index 2d31ae612ac3..87612c4daa83 100644 --- a/sdk/auditmanager/README.md +++ b/sdk/auditmanager/README.md @@ -26,7 +26,7 @@ your project, add the following to your **Cargo.toml** file: ```toml [dependencies] aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-auditmanager = "1.78.0" +aws-sdk-auditmanager = "1.79.0" tokio = { version = "1", features = ["full"] } ``` diff --git a/sdk/auditmanager/src/error_meta.rs b/sdk/auditmanager/src/error_meta.rs index 421919067426..2b1063687624 100644 --- a/sdk/auditmanager/src/error_meta.rs +++ b/sdk/auditmanager/src/error_meta.rs @@ -1738,6 +1738,9 @@ impl From { Error::ResourceNotFoundException(inner) } + crate::operation::register_organization_admin_account::RegisterOrganizationAdminAccountError::ThrottlingException(inner) => { + Error::ThrottlingException(inner) + } crate::operation::register_organization_admin_account::RegisterOrganizationAdminAccountError::ValidationException(inner) => { Error::ValidationException(inner) } diff --git a/sdk/auditmanager/src/lib.rs b/sdk/auditmanager/src/lib.rs index 0054acf9f187..de1dae9b0e7a 100644 --- a/sdk/auditmanager/src/lib.rs +++ b/sdk/auditmanager/src/lib.rs @@ -44,7 +44,7 @@ //! ```toml //! [dependencies] //! aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -//! aws-sdk-auditmanager = "1.78.0" +//! aws-sdk-auditmanager = "1.79.0" //! tokio = { version = "1", features = ["full"] } //! ``` //! diff --git a/sdk/auditmanager/src/operation/register_organization_admin_account.rs b/sdk/auditmanager/src/operation/register_organization_admin_account.rs index 79829dc34c35..ae13d00cc07a 100644 --- a/sdk/auditmanager/src/operation/register_organization_admin_account.rs +++ b/sdk/auditmanager/src/operation/register_organization_admin_account.rs @@ -270,6 +270,8 @@ pub enum RegisterOrganizationAdminAccountError { InternalServerException(crate::types::error::InternalServerException), ///

The resource that's specified in the request can't be found.

ResourceNotFoundException(crate::types::error::ResourceNotFoundException), + ///

The request was denied due to request throttling.

+ ThrottlingException(crate::types::error::ThrottlingException), ///

The request has invalid or missing parameters.

ValidationException(crate::types::error::ValidationException), /// An unexpected error occurred (e.g., invalid JSON returned by the service or an unknown error code). @@ -308,6 +310,7 @@ impl RegisterOrganizationAdminAccountError { Self::AccessDeniedException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::InternalServerException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::ResourceNotFoundException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::ThrottlingException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::ValidationException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::Unhandled(e) => &e.meta, } @@ -324,6 +327,10 @@ impl RegisterOrganizationAdminAccountError { pub fn is_resource_not_found_exception(&self) -> bool { matches!(self, Self::ResourceNotFoundException(_)) } + /// Returns `true` if the error kind is `RegisterOrganizationAdminAccountError::ThrottlingException`. + pub fn is_throttling_exception(&self) -> bool { + matches!(self, Self::ThrottlingException(_)) + } /// Returns `true` if the error kind is `RegisterOrganizationAdminAccountError::ValidationException`. pub fn is_validation_exception(&self) -> bool { matches!(self, Self::ValidationException(_)) @@ -335,6 +342,7 @@ impl ::std::error::Error for RegisterOrganizationAdminAccountError { Self::AccessDeniedException(_inner) => ::std::option::Option::Some(_inner), Self::InternalServerException(_inner) => ::std::option::Option::Some(_inner), Self::ResourceNotFoundException(_inner) => ::std::option::Option::Some(_inner), + Self::ThrottlingException(_inner) => ::std::option::Option::Some(_inner), Self::ValidationException(_inner) => ::std::option::Option::Some(_inner), Self::Unhandled(_inner) => ::std::option::Option::Some(&*_inner.source), } @@ -346,6 +354,7 @@ impl ::std::fmt::Display for RegisterOrganizationAdminAccountError { Self::AccessDeniedException(_inner) => _inner.fmt(f), Self::InternalServerException(_inner) => _inner.fmt(f), Self::ResourceNotFoundException(_inner) => _inner.fmt(f), + Self::ThrottlingException(_inner) => _inner.fmt(f), Self::ValidationException(_inner) => _inner.fmt(f), Self::Unhandled(_inner) => { if let ::std::option::Option::Some(code) = ::aws_smithy_types::error::metadata::ProvideErrorMetadata::code(self) { @@ -371,6 +380,7 @@ impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for RegisterOrgan Self::AccessDeniedException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::InternalServerException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::ResourceNotFoundException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::ThrottlingException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::ValidationException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::Unhandled(_inner) => &_inner.meta, } diff --git a/sdk/auditmanager/src/protocol_serde/shape_register_organization_admin_account.rs b/sdk/auditmanager/src/protocol_serde/shape_register_organization_admin_account.rs index 082ae5c5602f..8642fb25bbed 100644 --- a/sdk/auditmanager/src/protocol_serde/shape_register_organization_admin_account.rs +++ b/sdk/auditmanager/src/protocol_serde/shape_register_organization_admin_account.rs @@ -69,6 +69,20 @@ pub fn de_register_organization_admin_account_http_error( tmp }) } + "ThrottlingException" => crate::operation::register_organization_admin_account::RegisterOrganizationAdminAccountError::ThrottlingException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::ThrottlingExceptionBuilder::default(); + output = crate::protocol_serde::shape_throttling_exception::de_throttling_exception_json_err(_response_body, output) + .map_err(crate::operation::register_organization_admin_account::RegisterOrganizationAdminAccountError::unhandled)?; + let output = output.meta(generic); + crate::serde_util::throttling_exception_correct_errors(output) + .build() + .map_err(crate::operation::register_organization_admin_account::RegisterOrganizationAdminAccountError::unhandled)? + }; + tmp + }), "ValidationException" => crate::operation::register_organization_admin_account::RegisterOrganizationAdminAccountError::ValidationException({ #[allow(unused_mut)] let mut tmp = { diff --git a/sdk/cloudwatchlogs/Cargo.toml b/sdk/cloudwatchlogs/Cargo.toml index 876aa0f6b8fb..0ea6aea7ee88 100644 --- a/sdk/cloudwatchlogs/Cargo.toml +++ b/sdk/cloudwatchlogs/Cargo.toml @@ -1,7 +1,7 @@ # Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. [package] name = "aws-sdk-cloudwatchlogs" -version = "1.93.0" +version = "1.94.0" authors = ["AWS Rust SDK Team ", "Russell Cohen "] description = "AWS SDK for Amazon CloudWatch Logs" edition = "2021" diff --git a/sdk/cloudwatchlogs/README.md b/sdk/cloudwatchlogs/README.md index a703081f4a05..be15c6f46b8d 100644 --- a/sdk/cloudwatchlogs/README.md +++ b/sdk/cloudwatchlogs/README.md @@ -19,7 +19,7 @@ your project, add the following to your **Cargo.toml** file: ```toml [dependencies] aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-cloudwatchlogs = "1.93.0" +aws-sdk-cloudwatchlogs = "1.94.0" tokio = { version = "1", features = ["full"] } ``` diff --git a/sdk/cloudwatchlogs/src/client.rs b/sdk/cloudwatchlogs/src/client.rs index 697976f6d5d8..16d3b9f79997 100644 --- a/sdk/cloudwatchlogs/src/client.rs +++ b/sdk/cloudwatchlogs/src/client.rs @@ -267,6 +267,8 @@ mod get_log_events; mod get_log_group_fields; +mod get_log_object; + mod get_log_record; mod get_query_results; diff --git a/sdk/cloudwatchlogs/src/client/get_log_object.rs b/sdk/cloudwatchlogs/src/client/get_log_object.rs new file mode 100644 index 000000000000..b5866d127967 --- /dev/null +++ b/sdk/cloudwatchlogs/src/client/get_log_object.rs @@ -0,0 +1,14 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +impl super::Client { + /// Constructs a fluent builder for the [`GetLogObject`](crate::operation::get_log_object::builders::GetLogObjectFluentBuilder) operation. + /// + /// - The fluent builder is configurable: + /// - [`unmask(bool)`](crate::operation::get_log_object::builders::GetLogObjectFluentBuilder::unmask) / [`set_unmask(Option)`](crate::operation::get_log_object::builders::GetLogObjectFluentBuilder::set_unmask):
required: **false**

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.


+ /// - [`log_object_pointer(impl Into)`](crate::operation::get_log_object::builders::GetLogObjectFluentBuilder::log_object_pointer) / [`set_log_object_pointer(Option)`](crate::operation::get_log_object::builders::GetLogObjectFluentBuilder::set_log_object_pointer):
required: **true**

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.


+ /// - On success, responds with [`GetLogObjectOutput`](crate::operation::get_log_object::GetLogObjectOutput) with field(s): + /// - [`field_stream(EventReceiver)`](crate::operation::get_log_object::GetLogObjectOutput::field_stream):

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

+ /// - On failure, responds with [`SdkError`](crate::operation::get_log_object::GetLogObjectError) + pub fn get_log_object(&self) -> crate::operation::get_log_object::builders::GetLogObjectFluentBuilder { + crate::operation::get_log_object::builders::GetLogObjectFluentBuilder::new(self.handle.clone()) + } +} diff --git a/sdk/cloudwatchlogs/src/client/put_account_policy.rs b/sdk/cloudwatchlogs/src/client/put_account_policy.rs index e649e7faab9e..128633f76f2e 100644 --- a/sdk/cloudwatchlogs/src/client/put_account_policy.rs +++ b/sdk/cloudwatchlogs/src/client/put_account_policy.rs @@ -7,7 +7,7 @@ impl super::Client { /// - [`policy_document(impl Into)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::policy_document) / [`set_policy_document(Option)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::set_policy_document):
required: **true**

Specify the policy, in JSON.

Data protection policy

A data protection policy must include two JSON blocks:

  • The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.

    The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.

  • The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy.

    The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty.

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters long.

Subscription filter policy

A subscription filter policy can include the following attributes in a JSON block:

  • DestinationArn The ARN of the destination to deliver log events to. Supported destinations are:

    • An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.

    • An Firehose data stream in the same account as the subscription policy, for same-account delivery.

    • A Lambda function in the same account as the subscription policy, for same-account delivery.

    • A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

  • RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.

  • FilterPattern A filter pattern for subscribing to a filtered stream of log events.

  • Distribution The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.

Transformer policy

A transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see Processors that you can use.

Field index policy

A field index filter policy can include the following attribute in a JSON block:

  • Fields The array of field indexes to create.

It must contain at least one field index.

The following is an example of an index policy document that creates two indexes, RequestId and TransactionId.

"policyDocument": "{ \"Fields\": \[ \"RequestId\", \"TransactionId\" \] }"


/// - [`policy_type(PolicyType)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::policy_type) / [`set_policy_type(Option)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::set_policy_type):
required: **true**

The type of policy that you're creating or updating.


/// - [`scope(Scope)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::scope) / [`set_scope(Option)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::set_scope):
required: **false**

Currently the only valid value for this parameter is ALL, which specifies that the data protection policy applies to all log groups in the account. If you omit this parameter, the default of ALL is used.


- /// - [`selection_criteria(impl Into)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::selection_criteria) / [`set_selection_criteria(Option)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::set_selection_criteria):
required: **false**

Use this parameter to apply the new policy to a subset of log groups in the account.

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

Using the selectionCriteria parameter with SUBSCRIPTION_FILTER_POLICY is useful to help prevent infinite loops. For more information, see Log recursion prevention.


+ /// - [`selection_criteria(impl Into)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::selection_criteria) / [`set_selection_criteria(Option)`](crate::operation::put_account_policy::builders::PutAccountPolicyFluentBuilder::set_selection_criteria):
required: **false**

Use this parameter to apply the new policy to a subset of log groups in the account.

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

Using the selectionCriteria parameter with SUBSCRIPTION_FILTER_POLICY is useful to help prevent infinite loops. For more information, see Log recursion prevention.


/// - On success, responds with [`PutAccountPolicyOutput`](crate::operation::put_account_policy::PutAccountPolicyOutput) with field(s): /// - [`account_policy(Option)`](crate::operation::put_account_policy::PutAccountPolicyOutput::account_policy):

The account policy that you created.

/// - On failure, responds with [`SdkError`](crate::operation::put_account_policy::PutAccountPolicyError) diff --git a/sdk/cloudwatchlogs/src/error_meta.rs b/sdk/cloudwatchlogs/src/error_meta.rs index e92a59f09884..096b6ffbf113 100644 --- a/sdk/cloudwatchlogs/src/error_meta.rs +++ b/sdk/cloudwatchlogs/src/error_meta.rs @@ -11,6 +11,8 @@ pub enum Error { ///

PutLogEvents actions are now always accepted and never return DataAlreadyAcceptedException regardless of whether a given batch of log events has already been accepted.

/// DataAlreadyAcceptedException(crate::types::error::DataAlreadyAcceptedException), + ///

An internal error occurred during the streaming of log data. This exception is thrown when there's an issue with the internal streaming mechanism used by the GetLogObject operation.

+ InternalStreamingException(crate::types::error::InternalStreamingException), ///

The operation is not valid on the specified resource.

InvalidOperationException(crate::types::error::InvalidOperationException), ///

A parameter is specified incorrectly.

@@ -61,6 +63,7 @@ impl ::std::fmt::Display for Error { Error::AccessDeniedException(inner) => inner.fmt(f), Error::ConflictException(inner) => inner.fmt(f), Error::DataAlreadyAcceptedException(inner) => inner.fmt(f), + Error::InternalStreamingException(inner) => inner.fmt(f), Error::InvalidOperationException(inner) => inner.fmt(f), Error::InvalidParameterException(inner) => inner.fmt(f), Error::InvalidSequenceTokenException(inner) => inner.fmt(f), @@ -101,6 +104,7 @@ impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for Error { Self::AccessDeniedException(inner) => inner.meta(), Self::ConflictException(inner) => inner.meta(), Self::DataAlreadyAcceptedException(inner) => inner.meta(), + Self::InternalStreamingException(inner) => inner.meta(), Self::InvalidOperationException(inner) => inner.meta(), Self::InvalidParameterException(inner) => inner.meta(), Self::InvalidSequenceTokenException(inner) => inner.meta(), @@ -1779,6 +1783,33 @@ impl From for Er } } } +impl From<::aws_smithy_runtime_api::client::result::SdkError> for Error +where + R: Send + Sync + std::fmt::Debug + 'static, +{ + fn from(err: ::aws_smithy_runtime_api::client::result::SdkError) -> Self { + match err { + ::aws_smithy_runtime_api::client::result::SdkError::ServiceError(context) => Self::from(context.into_err()), + _ => Error::Unhandled(crate::error::sealed_unhandled::Unhandled { + meta: ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(&err).clone(), + source: err.into(), + }), + } + } +} +impl From for Error { + fn from(err: crate::operation::get_log_object::GetLogObjectError) -> Self { + match err { + crate::operation::get_log_object::GetLogObjectError::AccessDeniedException(inner) => Error::AccessDeniedException(inner), + crate::operation::get_log_object::GetLogObjectError::InvalidOperationException(inner) => Error::InvalidOperationException(inner), + crate::operation::get_log_object::GetLogObjectError::InvalidParameterException(inner) => Error::InvalidParameterException(inner), + crate::operation::get_log_object::GetLogObjectError::LimitExceededException(inner) => Error::LimitExceededException(inner), + crate::operation::get_log_object::GetLogObjectError::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner), + crate::operation::get_log_object::GetLogObjectError::InternalStreamingException(inner) => Error::InternalStreamingException(inner), + crate::operation::get_log_object::GetLogObjectError::Unhandled(inner) => Error::Unhandled(inner), + } + } +} impl From<::aws_smithy_runtime_api::client::result::SdkError> for Error where R: Send + Sync + std::fmt::Debug + 'static, @@ -2879,6 +2910,28 @@ impl From From<::aws_smithy_runtime_api::client::result::SdkError> for Error +where + R: Send + Sync + std::fmt::Debug + 'static, +{ + fn from(err: ::aws_smithy_runtime_api::client::result::SdkError) -> Self { + match err { + ::aws_smithy_runtime_api::client::result::SdkError::ServiceError(context) => Self::from(context.into_err()), + _ => Error::Unhandled(crate::error::sealed_unhandled::Unhandled { + meta: ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(&err).clone(), + source: err.into(), + }), + } + } +} +impl From for Error { + fn from(err: crate::types::error::GetLogObjectResponseStreamError) -> Self { + match err { + crate::types::error::GetLogObjectResponseStreamError::InternalStreamingException(inner) => Error::InternalStreamingException(inner), + crate::types::error::GetLogObjectResponseStreamError::Unhandled(inner) => Error::Unhandled(inner), + } + } +} impl From<::aws_smithy_runtime_api::client::result::SdkError> for Error where R: Send + Sync + std::fmt::Debug + 'static, @@ -2908,6 +2961,7 @@ impl ::std::error::Error for Error { Error::AccessDeniedException(inner) => inner.source(), Error::ConflictException(inner) => inner.source(), Error::DataAlreadyAcceptedException(inner) => inner.source(), + Error::InternalStreamingException(inner) => inner.source(), Error::InvalidOperationException(inner) => inner.source(), Error::InvalidParameterException(inner) => inner.source(), Error::InvalidSequenceTokenException(inner) => inner.source(), @@ -2934,6 +2988,7 @@ impl ::aws_types::request_id::RequestId for Error { Self::AccessDeniedException(e) => e.request_id(), Self::ConflictException(e) => e.request_id(), Self::DataAlreadyAcceptedException(e) => e.request_id(), + Self::InternalStreamingException(e) => e.request_id(), Self::InvalidOperationException(e) => e.request_id(), Self::InvalidParameterException(e) => e.request_id(), Self::InvalidSequenceTokenException(e) => e.request_id(), diff --git a/sdk/cloudwatchlogs/src/event_stream_serde.rs b/sdk/cloudwatchlogs/src/event_stream_serde.rs index e60385f18a1c..dd30a4622e08 100644 --- a/sdk/cloudwatchlogs/src/event_stream_serde.rs +++ b/sdk/cloudwatchlogs/src/event_stream_serde.rs @@ -1,4 +1,72 @@ // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +#[non_exhaustive] +#[derive(Debug)] +pub struct GetLogObjectResponseStreamUnmarshaller; + +impl GetLogObjectResponseStreamUnmarshaller { + pub fn new() -> Self { + GetLogObjectResponseStreamUnmarshaller + } +} +impl ::aws_smithy_eventstream::frame::UnmarshallMessage for GetLogObjectResponseStreamUnmarshaller { + type Output = crate::types::GetLogObjectResponseStream; + type Error = crate::types::error::GetLogObjectResponseStreamError; + fn unmarshall( + &self, + message: &::aws_smithy_types::event_stream::Message, + ) -> std::result::Result<::aws_smithy_eventstream::frame::UnmarshalledMessage, ::aws_smithy_eventstream::error::Error> + { + let response_headers = ::aws_smithy_eventstream::smithy::parse_response_headers(message)?; + match response_headers.message_type.as_str() { + "event" => match response_headers.smithy_type.as_str() { + "fields" => { + let parsed = crate::protocol_serde::shape_fields_data::de_fields_data_payload(&message.payload()[..]) + .map_err(|err| ::aws_smithy_eventstream::error::Error::unmarshalling(format!("failed to unmarshall Fields: {}", err)))?; + Ok(::aws_smithy_eventstream::frame::UnmarshalledMessage::Event( + crate::types::GetLogObjectResponseStream::Fields(parsed), + )) + } + _unknown_variant => Ok(::aws_smithy_eventstream::frame::UnmarshalledMessage::Event( + crate::types::GetLogObjectResponseStream::Unknown, + )), + }, + "exception" => { + let generic = match crate::protocol_serde::parse_event_stream_error_metadata(message.payload()) { + Ok(builder) => builder.build(), + Err(err) => { + return Ok(::aws_smithy_eventstream::frame::UnmarshalledMessage::Error( + crate::types::error::GetLogObjectResponseStreamError::unhandled(err), + )) + } + }; + if response_headers.smithy_type.as_str() == "InternalStreamingException" { + let mut builder = crate::types::error::builders::InternalStreamingExceptionBuilder::default(); + builder = crate::protocol_serde::shape_internal_streaming_exception::de_internal_streaming_exception_json_err( + &message.payload()[..], + builder, + ) + .map_err(|err| { + ::aws_smithy_eventstream::error::Error::unmarshalling(format!("failed to unmarshall InternalStreamingException: {}", err)) + })?; + builder.set_meta(Some(generic)); + return Ok(::aws_smithy_eventstream::frame::UnmarshalledMessage::Error( + crate::types::error::GetLogObjectResponseStreamError::InternalStreamingException(builder.build()), + )); + } + Ok(::aws_smithy_eventstream::frame::UnmarshalledMessage::Error( + crate::types::error::GetLogObjectResponseStreamError::generic(generic), + )) + } + value => { + return Err(::aws_smithy_eventstream::error::Error::unmarshalling(format!( + "unrecognized :message-type: {}", + value + ))); + } + } + } +} + #[non_exhaustive] #[derive(Debug)] pub struct StartLiveTailResponseStreamUnmarshaller; diff --git a/sdk/cloudwatchlogs/src/lib.rs b/sdk/cloudwatchlogs/src/lib.rs index bbf88e3651a6..b6883eb9d06d 100644 --- a/sdk/cloudwatchlogs/src/lib.rs +++ b/sdk/cloudwatchlogs/src/lib.rs @@ -37,7 +37,7 @@ //! ```toml //! [dependencies] //! aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -//! aws-sdk-cloudwatchlogs = "1.93.0" +//! aws-sdk-cloudwatchlogs = "1.94.0" //! tokio = { version = "1", features = ["full"] } //! ``` //! diff --git a/sdk/cloudwatchlogs/src/operation.rs b/sdk/cloudwatchlogs/src/operation.rs index a9edf30a0e3b..7b5f573f2f85 100644 --- a/sdk/cloudwatchlogs/src/operation.rs +++ b/sdk/cloudwatchlogs/src/operation.rs @@ -157,6 +157,9 @@ pub mod get_log_events; /// Types for the `GetLogGroupFields` operation. pub mod get_log_group_fields; +/// Types for the `GetLogObject` operation. +pub mod get_log_object; + /// Types for the `GetLogRecord` operation. pub mod get_log_record; diff --git a/sdk/cloudwatchlogs/src/operation/get_log_object.rs b/sdk/cloudwatchlogs/src/operation/get_log_object.rs new file mode 100644 index 000000000000..f3d6ded8cd11 --- /dev/null +++ b/sdk/cloudwatchlogs/src/operation/get_log_object.rs @@ -0,0 +1,436 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +/// Orchestration and serialization glue logic for `GetLogObject`. +#[derive(::std::clone::Clone, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct GetLogObject; +impl GetLogObject { + /// Creates a new `GetLogObject` + pub fn new() -> Self { + Self + } + pub(crate) async fn orchestrate( + runtime_plugins: &::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins, + input: crate::operation::get_log_object::GetLogObjectInput, + ) -> ::std::result::Result< + crate::operation::get_log_object::GetLogObjectOutput, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_log_object::GetLogObjectError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let map_err = |err: ::aws_smithy_runtime_api::client::result::SdkError< + ::aws_smithy_runtime_api::client::interceptors::context::Error, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >| { + err.map_service_error(|err| { + err.downcast::() + .expect("correct error type") + }) + }; + let context = Self::orchestrate_with_stop_point(runtime_plugins, input, ::aws_smithy_runtime::client::orchestrator::StopPoint::None) + .await + .map_err(map_err)?; + let output = context.finalize().map_err(map_err)?; + ::std::result::Result::Ok( + output + .downcast::() + .expect("correct output type"), + ) + } + + pub(crate) async fn orchestrate_with_stop_point( + runtime_plugins: &::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins, + input: crate::operation::get_log_object::GetLogObjectInput, + stop_point: ::aws_smithy_runtime::client::orchestrator::StopPoint, + ) -> ::std::result::Result< + ::aws_smithy_runtime_api::client::interceptors::context::InterceptorContext, + ::aws_smithy_runtime_api::client::result::SdkError< + ::aws_smithy_runtime_api::client::interceptors::context::Error, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let input = ::aws_smithy_runtime_api::client::interceptors::context::Input::erase(input); + use ::tracing::Instrument; + ::aws_smithy_runtime::client::orchestrator::invoke_with_stop_point("CloudWatch Logs", "GetLogObject", input, runtime_plugins, stop_point) + // Create a parent span for the entire operation. Includes a random, internal-only, + // seven-digit ID for the operation orchestration so that it can be correlated in the logs. + .instrument(::tracing::debug_span!( + "CloudWatch Logs.GetLogObject", + "rpc.service" = "CloudWatch Logs", + "rpc.method" = "GetLogObject", + "sdk_invocation_id" = ::fastrand::u32(1_000_000..10_000_000), + "rpc.system" = "aws-api", + )) + .await + } + + pub(crate) fn operation_runtime_plugins( + client_runtime_plugins: ::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins, + client_config: &crate::config::Config, + config_override: ::std::option::Option, + ) -> ::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins { + let mut runtime_plugins = client_runtime_plugins.with_operation_plugin(Self::new()); + + if let ::std::option::Option::Some(config_override) = config_override { + for plugin in config_override.runtime_plugins.iter().cloned() { + runtime_plugins = runtime_plugins.with_operation_plugin(plugin); + } + runtime_plugins = runtime_plugins.with_operation_plugin(crate::config::ConfigOverrideRuntimePlugin::new( + config_override, + client_config.config.clone(), + &client_config.runtime_components, + )); + } + runtime_plugins + } +} +impl ::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugin for GetLogObject { + fn config(&self) -> ::std::option::Option<::aws_smithy_types::config_bag::FrozenLayer> { + let mut cfg = ::aws_smithy_types::config_bag::Layer::new("GetLogObject"); + + cfg.store_put(::aws_smithy_runtime_api::client::ser_de::SharedRequestSerializer::new( + GetLogObjectRequestSerializer, + )); + cfg.store_put(::aws_smithy_runtime_api::client::ser_de::SharedResponseDeserializer::new( + GetLogObjectResponseDeserializer, + )); + + cfg.store_put(::aws_smithy_runtime_api::client::auth::AuthSchemeOptionResolverParams::new( + crate::config::auth::Params::builder() + .operation_name("GetLogObject") + .build() + .expect("required fields set"), + )); + + cfg.store_put(::aws_smithy_runtime_api::client::orchestrator::Metadata::new( + "GetLogObject", + "CloudWatch Logs", + )); + let mut signing_options = ::aws_runtime::auth::SigningOptions::default(); + signing_options.double_uri_encode = true; + signing_options.content_sha256_header = false; + signing_options.normalize_uri_path = true; + signing_options.payload_override = None; + + cfg.store_put(::aws_runtime::auth::SigV4OperationSigningConfig { + signing_options, + ..::std::default::Default::default() + }); + + ::std::option::Option::Some(cfg.freeze()) + } + + fn runtime_components( + &self, + _: &::aws_smithy_runtime_api::client::runtime_components::RuntimeComponentsBuilder, + ) -> ::std::borrow::Cow<'_, ::aws_smithy_runtime_api::client::runtime_components::RuntimeComponentsBuilder> { + #[allow(unused_mut)] + let mut rcb = ::aws_smithy_runtime_api::client::runtime_components::RuntimeComponentsBuilder::new("GetLogObject") + .with_interceptor(GetLogObjectEndpointParamsInterceptor) + .with_retry_classifier(::aws_smithy_runtime::client::retries::classifiers::TransientErrorClassifier::< + crate::operation::get_log_object::GetLogObjectError, + >::new()) + .with_retry_classifier(::aws_smithy_runtime::client::retries::classifiers::ModeledAsRetryableClassifier::< + crate::operation::get_log_object::GetLogObjectError, + >::new()) + .with_retry_classifier(::aws_runtime::retries::classifiers::AwsErrorCodeClassifier::< + crate::operation::get_log_object::GetLogObjectError, + >::new()); + + ::std::borrow::Cow::Owned(rcb) + } +} + +#[derive(Debug)] +struct GetLogObjectResponseDeserializer; +impl ::aws_smithy_runtime_api::client::ser_de::DeserializeResponse for GetLogObjectResponseDeserializer { + fn deserialize_streaming( + &self, + response: &mut ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + ) -> ::std::option::Option<::aws_smithy_runtime_api::client::interceptors::context::OutputOrError> { + #[allow(unused_mut)] + let mut force_error = false; + ::tracing::debug!(request_id = ?::aws_types::request_id::RequestId::request_id(response)); + + // If this is an error, defer to the non-streaming parser + if (!response.status().is_success() && response.status().as_u16() != 200) || force_error { + return ::std::option::Option::None; + } + ::std::option::Option::Some(crate::protocol_serde::type_erase_result( + crate::protocol_serde::shape_get_log_object::de_get_log_object_http_response(response), + )) + } + + fn deserialize_nonstreaming( + &self, + response: &::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + ) -> ::aws_smithy_runtime_api::client::interceptors::context::OutputOrError { + // For streaming operations, we only hit this case if its an error + let body = response.body().bytes().expect("body loaded"); + crate::protocol_serde::type_erase_result(crate::protocol_serde::shape_get_log_object::de_get_log_object_http_error( + response.status().as_u16(), + response.headers(), + body, + )) + } +} +#[derive(Debug)] +struct GetLogObjectRequestSerializer; +impl ::aws_smithy_runtime_api::client::ser_de::SerializeRequest for GetLogObjectRequestSerializer { + #[allow(unused_mut, clippy::let_and_return, clippy::needless_borrow, clippy::useless_conversion)] + fn serialize_input( + &self, + input: ::aws_smithy_runtime_api::client::interceptors::context::Input, + _cfg: &mut ::aws_smithy_types::config_bag::ConfigBag, + ) -> ::std::result::Result<::aws_smithy_runtime_api::client::orchestrator::HttpRequest, ::aws_smithy_runtime_api::box_error::BoxError> { + let input = input + .downcast::() + .expect("correct type"); + let _header_serialization_settings = _cfg + .load::() + .cloned() + .unwrap_or_default(); + let mut request_builder = { + fn uri_base( + _input: &crate::operation::get_log_object::GetLogObjectInput, + output: &mut ::std::string::String, + ) -> ::std::result::Result<(), ::aws_smithy_types::error::operation::BuildError> { + use ::std::fmt::Write as _; + ::std::write!(output, "/").expect("formatting should succeed"); + ::std::result::Result::Ok(()) + } + #[allow(clippy::unnecessary_wraps)] + fn update_http_builder( + input: &crate::operation::get_log_object::GetLogObjectInput, + builder: ::http::request::Builder, + ) -> ::std::result::Result<::http::request::Builder, ::aws_smithy_types::error::operation::BuildError> { + let mut uri = ::std::string::String::new(); + uri_base(input, &mut uri)?; + ::std::result::Result::Ok(builder.method("POST").uri(uri)) + } + let mut builder = update_http_builder(&input, ::http::request::Builder::new())?; + builder = _header_serialization_settings.set_default_header(builder, ::http::header::CONTENT_TYPE, "application/x-amz-json-1.1"); + builder = _header_serialization_settings.set_default_header( + builder, + ::http::header::HeaderName::from_static("x-amz-target"), + "Logs_20140328.GetLogObject", + ); + builder + }; + let body = ::aws_smithy_types::body::SdkBody::from(crate::protocol_serde::shape_get_log_object::ser_get_log_object_input(&input)?); + if let Some(content_length) = body.content_length() { + let content_length = content_length.to_string(); + request_builder = _header_serialization_settings.set_default_header(request_builder, ::http::header::CONTENT_LENGTH, &content_length); + } + ::std::result::Result::Ok(request_builder.body(body).expect("valid request").try_into().unwrap()) + } +} +#[derive(Debug)] +struct GetLogObjectEndpointParamsInterceptor; + +impl ::aws_smithy_runtime_api::client::interceptors::Intercept for GetLogObjectEndpointParamsInterceptor { + fn name(&self) -> &'static str { + "GetLogObjectEndpointParamsInterceptor" + } + + fn read_before_execution( + &self, + context: &::aws_smithy_runtime_api::client::interceptors::context::BeforeSerializationInterceptorContextRef< + '_, + ::aws_smithy_runtime_api::client::interceptors::context::Input, + ::aws_smithy_runtime_api::client::interceptors::context::Output, + ::aws_smithy_runtime_api::client::interceptors::context::Error, + >, + cfg: &mut ::aws_smithy_types::config_bag::ConfigBag, + ) -> ::std::result::Result<(), ::aws_smithy_runtime_api::box_error::BoxError> { + let _input = context + .input() + .downcast_ref::() + .ok_or("failed to downcast to GetLogObjectInput")?; + + let endpoint_prefix = ::aws_smithy_runtime_api::client::endpoint::EndpointPrefix::new("streaming-").map_err(|err| { + ::aws_smithy_runtime_api::client::interceptors::error::ContextAttachedError::new("endpoint prefix could not be built", err) + })?; + cfg.interceptor_state().store_put(endpoint_prefix); + + let params = crate::config::endpoint::Params::builder() + .set_region(cfg.load::<::aws_types::region::Region>().map(|r| r.as_ref().to_owned())) + .set_use_dual_stack(cfg.load::<::aws_types::endpoint_config::UseDualStack>().map(|ty| ty.0)) + .set_use_fips(cfg.load::<::aws_types::endpoint_config::UseFips>().map(|ty| ty.0)) + .set_endpoint(cfg.load::<::aws_types::endpoint_config::EndpointUrl>().map(|ty| ty.0.clone())) + .build() + .map_err(|err| { + ::aws_smithy_runtime_api::client::interceptors::error::ContextAttachedError::new("endpoint params could not be built", err) + })?; + cfg.interceptor_state() + .store_put(::aws_smithy_runtime_api::client::endpoint::EndpointResolverParams::new(params)); + ::std::result::Result::Ok(()) + } +} + +// The get_* functions below are generated from JMESPath expressions in the +// operationContextParams trait. They target the operation's input shape. + +/// Error type for the `GetLogObjectError` operation. +#[non_exhaustive] +#[derive(::std::fmt::Debug)] +pub enum GetLogObjectError { + ///

You don't have sufficient permissions to perform this action.

+ AccessDeniedException(crate::types::error::AccessDeniedException), + ///

The operation is not valid on the specified resource.

+ InvalidOperationException(crate::types::error::InvalidOperationException), + ///

A parameter is specified incorrectly.

+ InvalidParameterException(crate::types::error::InvalidParameterException), + ///

You have reached the maximum number of resources that can be created.

+ LimitExceededException(crate::types::error::LimitExceededException), + ///

The specified resource does not exist.

+ ResourceNotFoundException(crate::types::error::ResourceNotFoundException), + ///

An internal error occurred during the streaming of log data. This exception is thrown when there's an issue with the internal streaming mechanism used by the GetLogObject operation.

+ InternalStreamingException(crate::types::error::InternalStreamingException), + /// An unexpected error occurred (e.g., invalid JSON returned by the service or an unknown error code). + #[deprecated(note = "Matching `Unhandled` directly is not forwards compatible. Instead, match using a \ + variable wildcard pattern and check `.code()`: + \ +    `err if err.code() == Some(\"SpecificExceptionCode\") => { /* handle the error */ }` + \ + See [`ProvideErrorMetadata`](#impl-ProvideErrorMetadata-for-GetLogObjectError) for what information is available for the error.")] + Unhandled(crate::error::sealed_unhandled::Unhandled), +} +impl GetLogObjectError { + /// Creates the `GetLogObjectError::Unhandled` variant from any error type. + pub fn unhandled( + err: impl ::std::convert::Into<::std::boxed::Box>, + ) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source: err.into(), + meta: ::std::default::Default::default(), + }) + } + + /// Creates the `GetLogObjectError::Unhandled` variant from an [`ErrorMetadata`](::aws_smithy_types::error::ErrorMetadata). + pub fn generic(err: ::aws_smithy_types::error::ErrorMetadata) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source: err.clone().into(), + meta: err, + }) + } + /// + /// Returns error metadata, which includes the error code, message, + /// request ID, and potentially additional information. + /// + pub fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { + match self { + Self::AccessDeniedException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::InvalidOperationException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::InvalidParameterException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::LimitExceededException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::ResourceNotFoundException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::InternalStreamingException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::Unhandled(e) => &e.meta, + } + } + /// Returns `true` if the error kind is `GetLogObjectError::AccessDeniedException`. + pub fn is_access_denied_exception(&self) -> bool { + matches!(self, Self::AccessDeniedException(_)) + } + /// Returns `true` if the error kind is `GetLogObjectError::InvalidOperationException`. + pub fn is_invalid_operation_exception(&self) -> bool { + matches!(self, Self::InvalidOperationException(_)) + } + /// Returns `true` if the error kind is `GetLogObjectError::InvalidParameterException`. + pub fn is_invalid_parameter_exception(&self) -> bool { + matches!(self, Self::InvalidParameterException(_)) + } + /// Returns `true` if the error kind is `GetLogObjectError::LimitExceededException`. + pub fn is_limit_exceeded_exception(&self) -> bool { + matches!(self, Self::LimitExceededException(_)) + } + /// Returns `true` if the error kind is `GetLogObjectError::ResourceNotFoundException`. + pub fn is_resource_not_found_exception(&self) -> bool { + matches!(self, Self::ResourceNotFoundException(_)) + } + /// Returns `true` if the error kind is `GetLogObjectError::InternalStreamingException`. + pub fn is_internal_streaming_exception(&self) -> bool { + matches!(self, Self::InternalStreamingException(_)) + } +} +impl ::std::error::Error for GetLogObjectError { + fn source(&self) -> ::std::option::Option<&(dyn ::std::error::Error + 'static)> { + match self { + Self::AccessDeniedException(_inner) => ::std::option::Option::Some(_inner), + Self::InvalidOperationException(_inner) => ::std::option::Option::Some(_inner), + Self::InvalidParameterException(_inner) => ::std::option::Option::Some(_inner), + Self::LimitExceededException(_inner) => ::std::option::Option::Some(_inner), + Self::ResourceNotFoundException(_inner) => ::std::option::Option::Some(_inner), + Self::InternalStreamingException(_inner) => ::std::option::Option::Some(_inner), + Self::Unhandled(_inner) => ::std::option::Option::Some(&*_inner.source), + } + } +} +impl ::std::fmt::Display for GetLogObjectError { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match self { + Self::AccessDeniedException(_inner) => _inner.fmt(f), + Self::InvalidOperationException(_inner) => _inner.fmt(f), + Self::InvalidParameterException(_inner) => _inner.fmt(f), + Self::LimitExceededException(_inner) => _inner.fmt(f), + Self::ResourceNotFoundException(_inner) => _inner.fmt(f), + Self::InternalStreamingException(_inner) => _inner.fmt(f), + Self::Unhandled(_inner) => { + if let ::std::option::Option::Some(code) = ::aws_smithy_types::error::metadata::ProvideErrorMetadata::code(self) { + write!(f, "unhandled error ({code})") + } else { + f.write_str("unhandled error") + } + } + } + } +} +impl ::aws_smithy_types::retry::ProvideErrorKind for GetLogObjectError { + fn code(&self) -> ::std::option::Option<&str> { + ::aws_smithy_types::error::metadata::ProvideErrorMetadata::code(self) + } + fn retryable_error_kind(&self) -> ::std::option::Option<::aws_smithy_types::retry::ErrorKind> { + ::std::option::Option::None + } +} +impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for GetLogObjectError { + fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { + match self { + Self::AccessDeniedException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::InvalidOperationException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::InvalidParameterException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::LimitExceededException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::ResourceNotFoundException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::InternalStreamingException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::Unhandled(_inner) => &_inner.meta, + } + } +} +impl ::aws_smithy_runtime_api::client::result::CreateUnhandledError for GetLogObjectError { + fn create_unhandled_error( + source: ::std::boxed::Box, + meta: ::std::option::Option<::aws_smithy_types::error::ErrorMetadata>, + ) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source, + meta: meta.unwrap_or_default(), + }) + } +} +impl ::aws_types::request_id::RequestId for crate::operation::get_log_object::GetLogObjectError { + fn request_id(&self) -> Option<&str> { + self.meta().request_id() + } +} + +pub use crate::operation::get_log_object::_get_log_object_output::GetLogObjectOutput; + +pub use crate::operation::get_log_object::_get_log_object_input::GetLogObjectInput; + +mod _get_log_object_input; + +mod _get_log_object_output; + +/// Builders +pub mod builders; diff --git a/sdk/cloudwatchlogs/src/operation/get_log_object/_get_log_object_input.rs b/sdk/cloudwatchlogs/src/operation/get_log_object/_get_log_object_input.rs new file mode 100644 index 000000000000..ae8bcd0b6166 --- /dev/null +++ b/sdk/cloudwatchlogs/src/operation/get_log_object/_get_log_object_input.rs @@ -0,0 +1,75 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +///

The parameters for the GetLogObject operation.

+#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub struct GetLogObjectInput { + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub unmask: ::std::option::Option, + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ pub log_object_pointer: ::std::option::Option<::std::string::String>, +} +impl GetLogObjectInput { + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub fn unmask(&self) -> ::std::option::Option { + self.unmask + } + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ pub fn log_object_pointer(&self) -> ::std::option::Option<&str> { + self.log_object_pointer.as_deref() + } +} +impl GetLogObjectInput { + /// Creates a new builder-style object to manufacture [`GetLogObjectInput`](crate::operation::get_log_object::GetLogObjectInput). + pub fn builder() -> crate::operation::get_log_object::builders::GetLogObjectInputBuilder { + crate::operation::get_log_object::builders::GetLogObjectInputBuilder::default() + } +} + +/// A builder for [`GetLogObjectInput`](crate::operation::get_log_object::GetLogObjectInput). +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct GetLogObjectInputBuilder { + pub(crate) unmask: ::std::option::Option, + pub(crate) log_object_pointer: ::std::option::Option<::std::string::String>, +} +impl GetLogObjectInputBuilder { + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub fn unmask(mut self, input: bool) -> Self { + self.unmask = ::std::option::Option::Some(input); + self + } + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub fn set_unmask(mut self, input: ::std::option::Option) -> Self { + self.unmask = input; + self + } + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub fn get_unmask(&self) -> &::std::option::Option { + &self.unmask + } + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ /// This field is required. + pub fn log_object_pointer(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.log_object_pointer = ::std::option::Option::Some(input.into()); + self + } + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ pub fn set_log_object_pointer(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.log_object_pointer = input; + self + } + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ pub fn get_log_object_pointer(&self) -> &::std::option::Option<::std::string::String> { + &self.log_object_pointer + } + /// Consumes the builder and constructs a [`GetLogObjectInput`](crate::operation::get_log_object::GetLogObjectInput). + pub fn build( + self, + ) -> ::std::result::Result { + ::std::result::Result::Ok(crate::operation::get_log_object::GetLogObjectInput { + unmask: self.unmask, + log_object_pointer: self.log_object_pointer, + }) + } +} diff --git a/sdk/cloudwatchlogs/src/operation/get_log_object/_get_log_object_output.rs b/sdk/cloudwatchlogs/src/operation/get_log_object/_get_log_object_output.rs new file mode 100644 index 000000000000..e62395605cb1 --- /dev/null +++ b/sdk/cloudwatchlogs/src/operation/get_log_object/_get_log_object_output.rs @@ -0,0 +1,97 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +///

The response from the GetLogObject operation.

+#[non_exhaustive] +#[derive(::std::fmt::Debug)] +pub struct GetLogObjectOutput { + ///

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

+ pub field_stream: + crate::event_receiver::EventReceiver, + _request_id: Option, +} +impl GetLogObjectOutput { + ///

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

+ pub fn field_stream( + &self, + ) -> &crate::event_receiver::EventReceiver { + &self.field_stream + } +} +impl ::aws_types::request_id::RequestId for GetLogObjectOutput { + fn request_id(&self) -> Option<&str> { + self._request_id.as_deref() + } +} +impl GetLogObjectOutput { + /// Creates a new builder-style object to manufacture [`GetLogObjectOutput`](crate::operation::get_log_object::GetLogObjectOutput). + pub fn builder() -> crate::operation::get_log_object::builders::GetLogObjectOutputBuilder { + crate::operation::get_log_object::builders::GetLogObjectOutputBuilder::default() + } + #[allow(unused)] + pub(crate) fn into_builder(self) -> crate::operation::get_log_object::builders::GetLogObjectOutputBuilder { + Self::builder().field_stream(self.field_stream) + } +} + +/// A builder for [`GetLogObjectOutput`](crate::operation::get_log_object::GetLogObjectOutput). +#[derive(::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct GetLogObjectOutputBuilder { + pub(crate) field_stream: ::std::option::Option< + crate::event_receiver::EventReceiver, + >, + _request_id: Option, +} +impl GetLogObjectOutputBuilder { + ///

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

+ pub fn field_stream( + mut self, + input: crate::event_receiver::EventReceiver, + ) -> Self { + self.field_stream = ::std::option::Option::Some(input); + self + } + ///

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

+ pub fn set_field_stream( + mut self, + input: ::std::option::Option< + crate::event_receiver::EventReceiver, + >, + ) -> Self { + self.field_stream = input; + self + } + ///

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

+ pub fn get_field_stream( + &self, + ) -> &::std::option::Option< + crate::event_receiver::EventReceiver, + > { + &self.field_stream + } + pub(crate) fn _request_id(mut self, request_id: impl Into) -> Self { + self._request_id = Some(request_id.into()); + self + } + + pub(crate) fn _set_request_id(&mut self, request_id: Option) -> &mut Self { + self._request_id = request_id; + self + } + /// Consumes the builder and constructs a [`GetLogObjectOutput`](crate::operation::get_log_object::GetLogObjectOutput). + /// This method will fail if any of the following fields are not set: + /// - [`field_stream`](crate::operation::get_log_object::builders::GetLogObjectOutputBuilder::field_stream) + pub fn build( + self, + ) -> ::std::result::Result { + ::std::result::Result::Ok(crate::operation::get_log_object::GetLogObjectOutput { + field_stream: self.field_stream.ok_or_else(|| { + ::aws_smithy_types::error::operation::BuildError::missing_field( + "field_stream", + "field_stream was not specified but it is required when building GetLogObjectOutput", + ) + })?, + _request_id: self._request_id, + }) + } +} diff --git a/sdk/cloudwatchlogs/src/operation/get_log_object/builders.rs b/sdk/cloudwatchlogs/src/operation/get_log_object/builders.rs new file mode 100644 index 000000000000..10ef6e6e2fec --- /dev/null +++ b/sdk/cloudwatchlogs/src/operation/get_log_object/builders.rs @@ -0,0 +1,175 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub use crate::operation::get_log_object::_get_log_object_output::GetLogObjectOutputBuilder; + +pub use crate::operation::get_log_object::_get_log_object_input::GetLogObjectInputBuilder; + +impl crate::operation::get_log_object::builders::GetLogObjectInputBuilder { + /// Sends a request with this input using the given client. + pub async fn send_with( + self, + client: &crate::Client, + ) -> ::std::result::Result< + crate::operation::get_log_object::GetLogObjectOutput, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_log_object::GetLogObjectError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let mut fluent_builder = client.get_log_object(); + fluent_builder.inner = self; + fluent_builder.send().await + } +} +/// Fluent builder constructing a request to `GetLogObject`. +/// +///

Retrieves a large logging object (LLO) and streams it back. This API is used to fetch the content of large portions of log events that have been ingested through the PutOpenTelemetryLogs API. When log events contain fields that would cause the total event size to exceed 1MB, CloudWatch Logs automatically processes up to 10 fields, starting with the largest fields. Each field is truncated as needed to keep the total event size as close to 1MB as possible. The excess portions are stored as Large Log Objects (LLOs) and these fields are processed separately and LLO reference system fields (in the format @ptr.$\[path.to.field\]) are added. The path in the reference field reflects the original JSON structure where the large field was located. For example, this could be @ptr.$\['input'\]\['message'\], @ptr.$\['AAA'\]\['BBB'\]\['CCC'\]\['DDD'\], @ptr.$\['AAA'\], or any other path matching your log structure.

+/// +/// [`GetLogObjectOutput`](crate::operation::get_log_object::GetLogObjectOutput) contains an event stream field as well as one or more non-event stream fields. +/// Due to its current implementation, the non-event stream fields are not fully deserialized +/// until the [`send`](Self::send) method completes. As a result, accessing these fields of the operation +/// output struct within an interceptor may return uninitialized values. +/// +#[derive(::std::clone::Clone, ::std::fmt::Debug)] +pub struct GetLogObjectFluentBuilder { + handle: ::std::sync::Arc, + inner: crate::operation::get_log_object::builders::GetLogObjectInputBuilder, + config_override: ::std::option::Option, +} +impl + crate::client::customize::internal::CustomizableSend< + crate::operation::get_log_object::GetLogObjectOutput, + crate::operation::get_log_object::GetLogObjectError, + > for GetLogObjectFluentBuilder +{ + fn send( + self, + config_override: crate::config::Builder, + ) -> crate::client::customize::internal::BoxFuture< + crate::client::customize::internal::SendResult< + crate::operation::get_log_object::GetLogObjectOutput, + crate::operation::get_log_object::GetLogObjectError, + >, + > { + ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await }) + } +} +impl GetLogObjectFluentBuilder { + /// Creates a new `GetLogObjectFluentBuilder`. + pub(crate) fn new(handle: ::std::sync::Arc) -> Self { + Self { + handle, + inner: ::std::default::Default::default(), + config_override: ::std::option::Option::None, + } + } + /// Access the GetLogObject as a reference. + pub fn as_input(&self) -> &crate::operation::get_log_object::builders::GetLogObjectInputBuilder { + &self.inner + } + /// Sends the request and returns the response. + /// + /// If an error occurs, an `SdkError` will be returned with additional details that + /// can be matched against. + /// + /// By default, any retryable failures will be retried twice. Retry behavior + /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be + /// set when configuring the client. + pub async fn send( + self, + ) -> ::std::result::Result< + crate::operation::get_log_object::GetLogObjectOutput, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_log_object::GetLogObjectError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let input = self + .inner + .build() + .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?; + let runtime_plugins = crate::operation::get_log_object::GetLogObject::operation_runtime_plugins( + self.handle.runtime_plugins.clone(), + &self.handle.conf, + self.config_override, + ); + let mut output = crate::operation::get_log_object::GetLogObject::orchestrate(&runtime_plugins, input).await?; + + // Converts any error encountered beyond this point into an `SdkError` response error + // with an `HttpResponse`. However, since we have already exited the `orchestrate` + // function, the original `HttpResponse` is no longer available and cannot be restored. + // This means that header information from the original response has been lost. + // + // Note that the response body would have been consumed by the deserializer + // regardless, even if the initial message was hypothetically processed during + // the orchestrator's deserialization phase but later resulted in an error. + fn response_error( + err: impl ::std::convert::Into<::aws_smithy_runtime_api::box_error::BoxError>, + ) -> ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_log_object::GetLogObjectError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + > { + ::aws_smithy_runtime_api::client::result::SdkError::response_error( + err, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse::new( + ::aws_smithy_runtime_api::http::StatusCode::try_from(200).expect("valid successful code"), + ::aws_smithy_types::body::SdkBody::empty(), + ), + ) + } + + let message = output.field_stream.try_recv_initial_response().await.map_err(response_error)?; + + match message { + ::std::option::Option::Some(_message) => ::std::result::Result::Ok(output), + ::std::option::Option::None => ::std::result::Result::Ok(output), + } + } + + /// Consumes this builder, creating a customizable operation that can be modified before being sent. + pub fn customize( + self, + ) -> crate::client::customize::CustomizableOperation< + crate::operation::get_log_object::GetLogObjectOutput, + crate::operation::get_log_object::GetLogObjectError, + Self, + > { + crate::client::customize::CustomizableOperation::new(self) + } + pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into) -> Self { + self.set_config_override(::std::option::Option::Some(config_override.into())); + self + } + + pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option) -> &mut Self { + self.config_override = config_override; + self + } + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub fn unmask(mut self, input: bool) -> Self { + self.inner = self.inner.unmask(input); + self + } + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub fn set_unmask(mut self, input: ::std::option::Option) -> Self { + self.inner = self.inner.set_unmask(input); + self + } + ///

A boolean flag that indicates whether to unmask sensitive log data. When set to true, any masked or redacted data in the log object will be displayed in its original form. Default is false.

+ pub fn get_unmask(&self) -> &::std::option::Option { + self.inner.get_unmask() + } + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ pub fn log_object_pointer(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.inner = self.inner.log_object_pointer(input.into()); + self + } + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ pub fn set_log_object_pointer(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.inner = self.inner.set_log_object_pointer(input); + self + } + ///

A pointer to the specific log object to retrieve. This is a required parameter that uniquely identifies the log object within CloudWatch Logs. The pointer is typically obtained from a previous query or filter operation.

+ pub fn get_log_object_pointer(&self) -> &::std::option::Option<::std::string::String> { + self.inner.get_log_object_pointer() + } +} diff --git a/sdk/cloudwatchlogs/src/operation/put_account_policy/_put_account_policy_input.rs b/sdk/cloudwatchlogs/src/operation/put_account_policy/_put_account_policy_input.rs index e83381599eb6..a1cccfbb05b3 100644 --- a/sdk/cloudwatchlogs/src/operation/put_account_policy/_put_account_policy_input.rs +++ b/sdk/cloudwatchlogs/src/operation/put_account_policy/_put_account_policy_input.rs @@ -60,7 +60,7 @@ pub struct PutAccountPolicyInput { ///

Currently the only valid value for this parameter is ALL, which specifies that the data protection policy applies to all log groups in the account. If you omit this parameter, the default of ALL is used.

pub scope: ::std::option::Option, ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

@@ -133,7 +133,7 @@ impl PutAccountPolicyInput { self.scope.as_ref() } ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

@@ -364,7 +364,7 @@ impl PutAccountPolicyInputBuilder { &self.scope } ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

@@ -374,7 +374,7 @@ impl PutAccountPolicyInputBuilder { self } ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

@@ -384,7 +384,7 @@ impl PutAccountPolicyInputBuilder { self } ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

diff --git a/sdk/cloudwatchlogs/src/operation/put_account_policy/builders.rs b/sdk/cloudwatchlogs/src/operation/put_account_policy/builders.rs index 02650678368c..34341562b10d 100644 --- a/sdk/cloudwatchlogs/src/operation/put_account_policy/builders.rs +++ b/sdk/cloudwatchlogs/src/operation/put_account_policy/builders.rs @@ -22,7 +22,7 @@ impl crate::operation::put_account_policy::builders::PutAccountPolicyInputBuilde } /// Fluent builder constructing a request to `PutAccountPolicy`. /// -///

Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.

+///

Creates an account-level data protection policy, subscription filter policy, field index policy, transformer policy, or metric extraction policy that applies to all log groups or a subset of log groups in the account.

///

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating.

///
    ///
  • @@ -33,6 +33,8 @@ impl crate::operation::put_account_policy::builders::PutAccountPolicyInputBuilde ///

    To create a transformer policy, you must have the logs:PutTransformer and logs:PutAccountPolicy permissions.

  • ///
  • ///

    To create a field index policy, you must have the logs:PutIndexPolicy and logs:PutAccountPolicy permissions.

  • +///
  • +///

    To create a metric extraction policy, you must have the logs:PutMetricExtractionPolicy and logs:PutAccountPolicy permissions.

  • ///
///

Data protection policy

///

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

@@ -73,6 +75,24 @@ impl crate::operation::put_account_policy::builders::PutAccountPolicyInputBuilde ///

You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging.

///

If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.

///

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.

+///

Metric extraction policy

+///

A metric extraction policy controls whether CloudWatch Metrics can be created through the Embedded Metrics Format (EMF) for log groups in your account. By default, EMF metric creation is enabled for all log groups. You can use metric extraction policies to disable EMF metric creation for your entire account or specific log groups.

+///

When a policy disables EMF metric creation for a log group, log events in the EMF format are still ingested, but no CloudWatch Metrics are created from them.

+///

Creating a policy disables metrics for AWS features that use EMF to create metrics, such as CloudWatch Container Insights and CloudWatch Application Signals. To prevent turning off those features by accident, we recommend that you exclude the underlying log-groups through a selection-criteria such as LogGroupNamePrefix NOT IN \["/aws/containerinsights", "/aws/ecs/containerinsights", "/aws/application-signals/data"\].

+///
+///

Each account can have either one account-level metric extraction policy that applies to all log groups, or up to 5 policies that are each scoped to a subset of log groups with the selectionCriteria parameter. The selection criteria supports filtering by LogGroupName and LogGroupNamePrefix using the operators IN and NOT IN. You can specify up to 50 values in each IN or NOT IN list.

+///

The selection criteria can be specified in these formats:

+///

LogGroupName IN \["log-group-1", "log-group-2"\]

+///

LogGroupNamePrefix NOT IN \["/aws/prefix1", "/aws/prefix2"\]

+///

If you have multiple account-level metric extraction policies with selection criteria, no two of them can have overlapping criteria. For example, if you have one policy with selection criteria LogGroupNamePrefix IN \["my-log"\], you can't have another metric extraction policy with selection criteria LogGroupNamePrefix IN \["/my-log-prod"\] or LogGroupNamePrefix IN \["/my-logging"\], as the set of log groups matching these prefixes would be a subset of the log groups matching the first policy's prefix, creating an overlap.

+///

When using NOT IN, only one policy with this operator is allowed per account.

+///

When combining policies with IN and NOT IN operators, the overlap check ensures that policies don't have conflicting effects. Two policies with IN and NOT IN operators do not overlap if and only if every value in the IN policy is completely contained within some value in the NOT IN policy. For example:

+///
    +///
  • +///

    If you have a NOT IN policy for prefix "/aws/lambda", you can create an IN policy for the exact log group name "/aws/lambda/function1" because the set of log groups matching "/aws/lambda/function1" is a subset of the log groups matching "/aws/lambda".

  • +///
  • +///

    If you have a NOT IN policy for prefix "/aws/lambda", you cannot create an IN policy for prefix "/aws" because the set of log groups matching "/aws" is not a subset of the log groups matching "/aws/lambda".

  • +///
#[derive(::std::clone::Clone, ::std::fmt::Debug)] pub struct PutAccountPolicyFluentBuilder { handle: ::std::sync::Arc, @@ -359,7 +379,7 @@ impl PutAccountPolicyFluentBuilder { self.inner.get_scope() } ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

@@ -369,7 +389,7 @@ impl PutAccountPolicyFluentBuilder { self } ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

@@ -379,7 +399,7 @@ impl PutAccountPolicyFluentBuilder { self } ///

Use this parameter to apply the new policy to a subset of log groups in the account.

- ///

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

+ ///

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

///

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN \[\]

///

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

///

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

diff --git a/sdk/cloudwatchlogs/src/operation/untag_log_group/builders.rs b/sdk/cloudwatchlogs/src/operation/untag_log_group/builders.rs index 89bbdd620169..4fc35de50135 100644 --- a/sdk/cloudwatchlogs/src/operation/untag_log_group/builders.rs +++ b/sdk/cloudwatchlogs/src/operation/untag_log_group/builders.rs @@ -27,7 +27,7 @@ impl crate::operation::untag_log_group::builders::UntagLogGroupInputBuilder { ///
///

Removes the specified tags from the specified log group.

///

To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource.

-///

CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys.

+///

When using IAM policies to control tag management for CloudWatch Logs log groups, the condition keys aws:Resource/key-name and aws:TagKeys cannot be used to restrict which tags users can assign.

#[deprecated(note = "Please use the generic tagging API UntagResource")] #[derive(::std::clone::Clone, ::std::fmt::Debug)] pub struct UntagLogGroupFluentBuilder { diff --git a/sdk/cloudwatchlogs/src/primitives.rs b/sdk/cloudwatchlogs/src/primitives.rs index 391aa9d59c9d..ec90f8121d26 100644 --- a/sdk/cloudwatchlogs/src/primitives.rs +++ b/sdk/cloudwatchlogs/src/primitives.rs @@ -1,4 +1,5 @@ // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub use ::aws_smithy_types::Blob; /// Event stream related primitives such as `Message` or `Header`. pub mod event_stream; diff --git a/sdk/cloudwatchlogs/src/protocol_serde.rs b/sdk/cloudwatchlogs/src/protocol_serde.rs index d5d90ee01b60..a0c5840173e6 100644 --- a/sdk/cloudwatchlogs/src/protocol_serde.rs +++ b/sdk/cloudwatchlogs/src/protocol_serde.rs @@ -127,6 +127,8 @@ pub(crate) mod shape_get_log_events; pub(crate) mod shape_get_log_group_fields; +pub(crate) mod shape_get_log_object; + pub(crate) mod shape_get_log_record; pub(crate) mod shape_get_query_results; @@ -321,12 +323,18 @@ pub(crate) mod shape_get_log_events_input; pub(crate) mod shape_get_log_group_fields_input; +pub(crate) mod shape_get_log_object_input; + +pub(crate) mod shape_get_log_object_output; + pub(crate) mod shape_get_log_record_input; pub(crate) mod shape_get_query_results_input; pub(crate) mod shape_get_transformer_input; +pub(crate) mod shape_internal_streaming_exception; + pub(crate) mod shape_invalid_operation_exception; pub(crate) mod shape_invalid_parameter_exception; @@ -563,6 +571,8 @@ pub(crate) mod shape_export_task; pub(crate) mod shape_field_index; +pub(crate) mod shape_fields_data; + pub(crate) mod shape_filtered_log_event; pub(crate) mod shape_grok; diff --git a/sdk/cloudwatchlogs/src/protocol_serde/shape_fields_data.rs b/sdk/cloudwatchlogs/src/protocol_serde/shape_fields_data.rs new file mode 100644 index 000000000000..9c060f1208f1 --- /dev/null +++ b/sdk/cloudwatchlogs/src/protocol_serde/shape_fields_data.rs @@ -0,0 +1,51 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub(crate) fn de_fields_data_payload( + input: &[u8], +) -> ::std::result::Result { + let mut tokens_owned = ::aws_smithy_json::deserialize::json_token_iter(crate::protocol_serde::or_empty_doc(input)).peekable(); + let tokens = &mut tokens_owned; + let result = crate::protocol_serde::shape_fields_data::de_fields_data(tokens)? + .ok_or_else(|| ::aws_smithy_json::deserialize::error::DeserializeError::custom("expected payload member value")); + if tokens.next().is_some() { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "found more JSON tokens after completing parsing", + )); + } + result +} + +pub(crate) fn de_fields_data<'a, I>( + tokens: &mut ::std::iter::Peekable, +) -> ::std::result::Result, ::aws_smithy_json::deserialize::error::DeserializeError> +where + I: Iterator, ::aws_smithy_json::deserialize::error::DeserializeError>>, +{ + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), + Some(::aws_smithy_json::deserialize::Token::StartObject { .. }) => { + #[allow(unused_mut)] + let mut builder = crate::types::builders::FieldsDataBuilder::default(); + loop { + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::EndObject { .. }) => break, + Some(::aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => match key.to_unescaped()?.as_ref() { + "data" => { + builder = builder.set_data(::aws_smithy_json::deserialize::token::expect_blob_or_null(tokens.next())?); + } + _ => ::aws_smithy_json::deserialize::token::skip_value(tokens)?, + }, + other => { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom(format!( + "expected object key or end object, found: {:?}", + other + ))) + } + } + } + Ok(Some(builder.build())) + } + _ => Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "expected start object or null", + )), + } +} diff --git a/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object.rs b/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object.rs new file mode 100644 index 000000000000..55397b4ebb6b --- /dev/null +++ b/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object.rs @@ -0,0 +1,143 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +#[allow(clippy::unnecessary_wraps)] +pub fn de_get_log_object_http_response( + response: &mut ::aws_smithy_runtime_api::http::Response, +) -> std::result::Result { + let mut _response_body = ::aws_smithy_types::body::SdkBody::taken(); + std::mem::swap(&mut _response_body, response.body_mut()); + let _response_body = &mut _response_body; + + let _response_status = response.status().as_u16(); + let _response_headers = response.headers(); + Ok({ + #[allow(unused_mut)] + let mut output = crate::operation::get_log_object::builders::GetLogObjectOutputBuilder::default(); + output = output.set_field_stream(Some(crate::protocol_serde::shape_get_log_object_output::de_field_stream_payload( + _response_body, + )?)); + output._set_request_id(::aws_types::request_id::RequestId::request_id(_response_headers).map(str::to_string)); + output.build().map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)? + }) +} + +#[allow(clippy::unnecessary_wraps)] +pub fn de_get_log_object_http_error( + _response_status: u16, + _response_headers: &::aws_smithy_runtime_api::http::Headers, + _response_body: &[u8], +) -> std::result::Result { + #[allow(unused_mut)] + let mut generic_builder = crate::protocol_serde::parse_http_error_metadata(_response_status, _response_headers, _response_body) + .map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)?; + generic_builder = ::aws_types::request_id::apply_request_id(generic_builder, _response_headers); + let generic = generic_builder.build(); + let error_code = match generic.code() { + Some(code) => code, + None => return Err(crate::operation::get_log_object::GetLogObjectError::unhandled(generic)), + }; + + let _error_message = generic.message().map(|msg| msg.to_owned()); + Err(match error_code { + "AccessDeniedException" => crate::operation::get_log_object::GetLogObjectError::AccessDeniedException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::AccessDeniedExceptionBuilder::default(); + output = crate::protocol_serde::shape_access_denied_exception::de_access_denied_exception_json_err(_response_body, output) + .map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + "InvalidOperationException" => crate::operation::get_log_object::GetLogObjectError::InvalidOperationException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::InvalidOperationExceptionBuilder::default(); + output = crate::protocol_serde::shape_invalid_operation_exception::de_invalid_operation_exception_json_err(_response_body, output) + .map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + "InvalidParameterException" => crate::operation::get_log_object::GetLogObjectError::InvalidParameterException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::InvalidParameterExceptionBuilder::default(); + output = crate::protocol_serde::shape_invalid_parameter_exception::de_invalid_parameter_exception_json_err(_response_body, output) + .map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + "LimitExceededException" => crate::operation::get_log_object::GetLogObjectError::LimitExceededException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::LimitExceededExceptionBuilder::default(); + output = crate::protocol_serde::shape_limit_exceeded_exception::de_limit_exceeded_exception_json_err(_response_body, output) + .map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + "ResourceNotFoundException" => crate::operation::get_log_object::GetLogObjectError::ResourceNotFoundException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::ResourceNotFoundExceptionBuilder::default(); + output = crate::protocol_serde::shape_resource_not_found_exception::de_resource_not_found_exception_json_err(_response_body, output) + .map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + "InternalStreamingException" => crate::operation::get_log_object::GetLogObjectError::InternalStreamingException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::InternalStreamingExceptionBuilder::default(); + output = crate::protocol_serde::shape_internal_streaming_exception::de_internal_streaming_exception_json_err(_response_body, output) + .map_err(crate::operation::get_log_object::GetLogObjectError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + _ => crate::operation::get_log_object::GetLogObjectError::generic(generic), + }) +} + +pub fn ser_get_log_object_input( + input: &crate::operation::get_log_object::GetLogObjectInput, +) -> ::std::result::Result<::aws_smithy_types::body::SdkBody, ::aws_smithy_types::error::operation::SerializationError> { + let mut out = String::new(); + let mut object = ::aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); + crate::protocol_serde::shape_get_log_object_input::ser_get_log_object_input_input(&mut object, input)?; + object.finish(); + Ok(::aws_smithy_types::body::SdkBody::from(out)) +} diff --git a/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object_input.rs b/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object_input.rs new file mode 100644 index 000000000000..9258105593ec --- /dev/null +++ b/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object_input.rs @@ -0,0 +1,13 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub fn ser_get_log_object_input_input( + object: &mut ::aws_smithy_json::serialize::JsonObjectWriter, + input: &crate::operation::get_log_object::GetLogObjectInput, +) -> ::std::result::Result<(), ::aws_smithy_types::error::operation::SerializationError> { + if let Some(var_1) = &input.unmask { + object.key("unmask").boolean(*var_1); + } + if let Some(var_2) = &input.log_object_pointer { + object.key("logObjectPointer").string(var_2.as_str()); + } + Ok(()) +} diff --git a/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object_output.rs b/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object_output.rs new file mode 100644 index 000000000000..3b538d97581b --- /dev/null +++ b/sdk/cloudwatchlogs/src/protocol_serde/shape_get_log_object_output.rs @@ -0,0 +1,14 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub fn de_field_stream_payload( + body: &mut ::aws_smithy_types::body::SdkBody, +) -> std::result::Result< + crate::event_receiver::EventReceiver, + crate::operation::get_log_object::GetLogObjectError, +> { + let unmarshaller = crate::event_stream_serde::GetLogObjectResponseStreamUnmarshaller::new(); + let body = std::mem::replace(body, ::aws_smithy_types::body::SdkBody::taken()); + Ok(crate::event_receiver::EventReceiver::new(::aws_smithy_http::event_stream::Receiver::new( + unmarshaller, + body, + ))) +} diff --git a/sdk/cloudwatchlogs/src/protocol_serde/shape_internal_streaming_exception.rs b/sdk/cloudwatchlogs/src/protocol_serde/shape_internal_streaming_exception.rs new file mode 100644 index 000000000000..8afd39450518 --- /dev/null +++ b/sdk/cloudwatchlogs/src/protocol_serde/shape_internal_streaming_exception.rs @@ -0,0 +1,37 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub(crate) fn de_internal_streaming_exception_json_err( + value: &[u8], + mut builder: crate::types::error::builders::InternalStreamingExceptionBuilder, +) -> ::std::result::Result +{ + let mut tokens_owned = ::aws_smithy_json::deserialize::json_token_iter(crate::protocol_serde::or_empty_doc(value)).peekable(); + let tokens = &mut tokens_owned; + ::aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; + loop { + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::EndObject { .. }) => break, + Some(::aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => match key.to_unescaped()?.as_ref() { + "message" => { + builder = builder.set_message( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?, + ); + } + _ => ::aws_smithy_json::deserialize::token::skip_value(tokens)?, + }, + other => { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom(format!( + "expected object key or end object, found: {:?}", + other + ))) + } + } + } + if tokens.next().is_some() { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "found more JSON tokens after completing parsing", + )); + } + Ok(builder) +} diff --git a/sdk/cloudwatchlogs/src/types.rs b/sdk/cloudwatchlogs/src/types.rs index 91d9a4bb0acd..595fdac048c5 100644 --- a/sdk/cloudwatchlogs/src/types.rs +++ b/sdk/cloudwatchlogs/src/types.rs @@ -177,6 +177,10 @@ pub use crate::types::_query_statistics::QueryStatistics; pub use crate::types::_result_field::ResultField; +pub use crate::types::_get_log_object_response_stream::GetLogObjectResponseStream; + +pub use crate::types::_fields_data::FieldsData; + pub use crate::types::_log_group_field::LogGroupField; pub use crate::types::_output_log_event::OutputLogEvent; @@ -305,10 +309,14 @@ mod _export_task_status_code; mod _field_index; +mod _fields_data; + mod _filtered_log_event; mod _flattened_element; +mod _get_log_object_response_stream; + mod _grok; mod _index_policy; diff --git a/sdk/cloudwatchlogs/src/types/_fields_data.rs b/sdk/cloudwatchlogs/src/types/_fields_data.rs new file mode 100644 index 000000000000..7abab0d310b6 --- /dev/null +++ b/sdk/cloudwatchlogs/src/types/_fields_data.rs @@ -0,0 +1,48 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +///

A structure containing the extracted fields from a log event. These fields are extracted based on the log format and can be used for structured querying and analysis.

+#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub struct FieldsData { + ///

The actual log data content returned in the streaming response. This contains the fields and values of the log event in a structured format that can be parsed and processed by the client.

+ pub data: ::std::option::Option<::aws_smithy_types::Blob>, +} +impl FieldsData { + ///

The actual log data content returned in the streaming response. This contains the fields and values of the log event in a structured format that can be parsed and processed by the client.

+ pub fn data(&self) -> ::std::option::Option<&::aws_smithy_types::Blob> { + self.data.as_ref() + } +} +impl FieldsData { + /// Creates a new builder-style object to manufacture [`FieldsData`](crate::types::FieldsData). + pub fn builder() -> crate::types::builders::FieldsDataBuilder { + crate::types::builders::FieldsDataBuilder::default() + } +} + +/// A builder for [`FieldsData`](crate::types::FieldsData). +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct FieldsDataBuilder { + pub(crate) data: ::std::option::Option<::aws_smithy_types::Blob>, +} +impl FieldsDataBuilder { + ///

The actual log data content returned in the streaming response. This contains the fields and values of the log event in a structured format that can be parsed and processed by the client.

+ pub fn data(mut self, input: ::aws_smithy_types::Blob) -> Self { + self.data = ::std::option::Option::Some(input); + self + } + ///

The actual log data content returned in the streaming response. This contains the fields and values of the log event in a structured format that can be parsed and processed by the client.

+ pub fn set_data(mut self, input: ::std::option::Option<::aws_smithy_types::Blob>) -> Self { + self.data = input; + self + } + ///

The actual log data content returned in the streaming response. This contains the fields and values of the log event in a structured format that can be parsed and processed by the client.

+ pub fn get_data(&self) -> &::std::option::Option<::aws_smithy_types::Blob> { + &self.data + } + /// Consumes the builder and constructs a [`FieldsData`](crate::types::FieldsData). + pub fn build(self) -> crate::types::FieldsData { + crate::types::FieldsData { data: self.data } + } +} diff --git a/sdk/cloudwatchlogs/src/types/_get_log_object_response_stream.rs b/sdk/cloudwatchlogs/src/types/_get_log_object_response_stream.rs new file mode 100644 index 000000000000..762742d54461 --- /dev/null +++ b/sdk/cloudwatchlogs/src/types/_get_log_object_response_stream.rs @@ -0,0 +1,38 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +///

A stream of structured log data returned by the GetLogObject operation. This stream contains log events with their associated metadata and extracted fields.

+#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub enum GetLogObjectResponseStream { + ///

A structure containing the extracted fields from a log event. These fields are extracted based on the log format and can be used for structured querying and analysis.

+ Fields(crate::types::FieldsData), + /// The `Unknown` variant represents cases where new union variant was received. Consider upgrading the SDK to the latest available version. + /// An unknown enum variant + /// + /// _Note: If you encounter this error, consider upgrading your SDK to the latest version._ + /// The `Unknown` variant represents cases where the server sent a value that wasn't recognized + /// by the client. This can happen when the server adds new functionality, but the client has not been updated. + /// To investigate this, consider turning on debug logging to print the raw HTTP response. + #[non_exhaustive] + Unknown, +} +impl GetLogObjectResponseStream { + #[allow(irrefutable_let_patterns)] + /// Tries to convert the enum instance into [`Fields`](crate::types::GetLogObjectResponseStream::Fields), extracting the inner [`FieldsData`](crate::types::FieldsData). + /// Returns `Err(&Self)` if it can't be converted. + pub fn as_fields(&self) -> ::std::result::Result<&crate::types::FieldsData, &Self> { + if let GetLogObjectResponseStream::Fields(val) = &self { + ::std::result::Result::Ok(val) + } else { + ::std::result::Result::Err(self) + } + } + /// Returns true if this is a [`Fields`](crate::types::GetLogObjectResponseStream::Fields). + pub fn is_fields(&self) -> bool { + self.as_fields().is_ok() + } + /// Returns true if the enum instance is the `Unknown` variant. + pub fn is_unknown(&self) -> bool { + matches!(self, Self::Unknown) + } +} diff --git a/sdk/cloudwatchlogs/src/types/_policy_type.rs b/sdk/cloudwatchlogs/src/types/_policy_type.rs index 9c11337bc812..259956bbedaf 100644 --- a/sdk/cloudwatchlogs/src/types/_policy_type.rs +++ b/sdk/cloudwatchlogs/src/types/_policy_type.rs @@ -14,6 +14,7 @@ /// match policytype { /// PolicyType::DataProtectionPolicy => { /* ... */ }, /// PolicyType::FieldIndexPolicy => { /* ... */ }, +/// PolicyType::MetricExtractionPolicy => { /* ... */ }, /// PolicyType::SubscriptionFilterPolicy => { /* ... */ }, /// PolicyType::TransformerPolicy => { /* ... */ }, /// other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ }, @@ -49,6 +50,8 @@ pub enum PolicyType { #[allow(missing_docs)] // documentation missing in model FieldIndexPolicy, #[allow(missing_docs)] // documentation missing in model + MetricExtractionPolicy, + #[allow(missing_docs)] // documentation missing in model SubscriptionFilterPolicy, #[allow(missing_docs)] // documentation missing in model TransformerPolicy, @@ -61,6 +64,7 @@ impl ::std::convert::From<&str> for PolicyType { match s { "DATA_PROTECTION_POLICY" => PolicyType::DataProtectionPolicy, "FIELD_INDEX_POLICY" => PolicyType::FieldIndexPolicy, + "METRIC_EXTRACTION_POLICY" => PolicyType::MetricExtractionPolicy, "SUBSCRIPTION_FILTER_POLICY" => PolicyType::SubscriptionFilterPolicy, "TRANSFORMER_POLICY" => PolicyType::TransformerPolicy, other => PolicyType::Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue(other.to_owned())), @@ -80,6 +84,7 @@ impl PolicyType { match self { PolicyType::DataProtectionPolicy => "DATA_PROTECTION_POLICY", PolicyType::FieldIndexPolicy => "FIELD_INDEX_POLICY", + PolicyType::MetricExtractionPolicy => "METRIC_EXTRACTION_POLICY", PolicyType::SubscriptionFilterPolicy => "SUBSCRIPTION_FILTER_POLICY", PolicyType::TransformerPolicy => "TRANSFORMER_POLICY", PolicyType::Unknown(value) => value.as_str(), @@ -90,6 +95,7 @@ impl PolicyType { &[ "DATA_PROTECTION_POLICY", "FIELD_INDEX_POLICY", + "METRIC_EXTRACTION_POLICY", "SUBSCRIPTION_FILTER_POLICY", "TRANSFORMER_POLICY", ] @@ -117,6 +123,7 @@ impl ::std::fmt::Display for PolicyType { match self { PolicyType::DataProtectionPolicy => write!(f, "DATA_PROTECTION_POLICY"), PolicyType::FieldIndexPolicy => write!(f, "FIELD_INDEX_POLICY"), + PolicyType::MetricExtractionPolicy => write!(f, "METRIC_EXTRACTION_POLICY"), PolicyType::SubscriptionFilterPolicy => write!(f, "SUBSCRIPTION_FILTER_POLICY"), PolicyType::TransformerPolicy => write!(f, "TRANSFORMER_POLICY"), PolicyType::Unknown(value) => write!(f, "{}", value), diff --git a/sdk/cloudwatchlogs/src/types/builders.rs b/sdk/cloudwatchlogs/src/types/builders.rs index 9786c7393673..2d39b7541fcc 100644 --- a/sdk/cloudwatchlogs/src/types/builders.rs +++ b/sdk/cloudwatchlogs/src/types/builders.rs @@ -125,6 +125,8 @@ pub use crate::types::_query_statistics::QueryStatisticsBuilder; pub use crate::types::_result_field::ResultFieldBuilder; +pub use crate::types::_fields_data::FieldsDataBuilder; + pub use crate::types::_log_group_field::LogGroupFieldBuilder; pub use crate::types::_output_log_event::OutputLogEventBuilder; diff --git a/sdk/cloudwatchlogs/src/types/error.rs b/sdk/cloudwatchlogs/src/types/error.rs index e10bd1ef21e0..510c5d50a6b6 100644 --- a/sdk/cloudwatchlogs/src/types/error.rs +++ b/sdk/cloudwatchlogs/src/types/error.rs @@ -148,6 +148,111 @@ pub use crate::types::error::_data_already_accepted_exception::DataAlreadyAccept pub use crate::types::error::_service_quota_exceeded_exception::ServiceQuotaExceededException; +pub use crate::types::error::_internal_streaming_exception::InternalStreamingException; + +/// Error type for the `GetLogObjectResponseStreamError` operation. +#[non_exhaustive] +#[derive(::std::fmt::Debug)] +pub enum GetLogObjectResponseStreamError { + ///

An internal error occurred during the streaming of log data. This exception is thrown when there's an issue with the internal streaming mechanism used by the GetLogObject operation.

+ InternalStreamingException(crate::types::error::InternalStreamingException), + /// An unexpected error occurred (e.g., invalid JSON returned by the service or an unknown error code). + #[deprecated(note = "Matching `Unhandled` directly is not forwards compatible. Instead, match using a \ + variable wildcard pattern and check `.code()`: + \ +    `err if err.code() == Some(\"SpecificExceptionCode\") => { /* handle the error */ }` + \ + See [`ProvideErrorMetadata`](#impl-ProvideErrorMetadata-for-GetLogObjectResponseStreamError) for what information is available for the error.")] + Unhandled(crate::error::sealed_unhandled::Unhandled), +} +impl GetLogObjectResponseStreamError { + /// Creates the `GetLogObjectResponseStreamError::Unhandled` variant from any error type. + pub fn unhandled( + err: impl ::std::convert::Into<::std::boxed::Box>, + ) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source: err.into(), + meta: ::std::default::Default::default(), + }) + } + + /// Creates the `GetLogObjectResponseStreamError::Unhandled` variant from an [`ErrorMetadata`](::aws_smithy_types::error::ErrorMetadata). + pub fn generic(err: ::aws_smithy_types::error::ErrorMetadata) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source: err.clone().into(), + meta: err, + }) + } + /// + /// Returns error metadata, which includes the error code, message, + /// request ID, and potentially additional information. + /// + pub fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { + match self { + Self::InternalStreamingException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::Unhandled(e) => &e.meta, + } + } + /// Returns `true` if the error kind is `GetLogObjectResponseStreamError::InternalStreamingException`. + pub fn is_internal_streaming_exception(&self) -> bool { + matches!(self, Self::InternalStreamingException(_)) + } +} +impl ::std::error::Error for GetLogObjectResponseStreamError { + fn source(&self) -> ::std::option::Option<&(dyn ::std::error::Error + 'static)> { + match self { + Self::InternalStreamingException(_inner) => ::std::option::Option::Some(_inner), + Self::Unhandled(_inner) => ::std::option::Option::Some(&*_inner.source), + } + } +} +impl ::std::fmt::Display for GetLogObjectResponseStreamError { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match self { + Self::InternalStreamingException(_inner) => _inner.fmt(f), + Self::Unhandled(_inner) => { + if let ::std::option::Option::Some(code) = ::aws_smithy_types::error::metadata::ProvideErrorMetadata::code(self) { + write!(f, "unhandled error ({code})") + } else { + f.write_str("unhandled error") + } + } + } + } +} +impl ::aws_smithy_types::retry::ProvideErrorKind for GetLogObjectResponseStreamError { + fn code(&self) -> ::std::option::Option<&str> { + ::aws_smithy_types::error::metadata::ProvideErrorMetadata::code(self) + } + fn retryable_error_kind(&self) -> ::std::option::Option<::aws_smithy_types::retry::ErrorKind> { + ::std::option::Option::None + } +} +impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for GetLogObjectResponseStreamError { + fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { + match self { + Self::InternalStreamingException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::Unhandled(_inner) => &_inner.meta, + } + } +} +impl ::aws_smithy_runtime_api::client::result::CreateUnhandledError for GetLogObjectResponseStreamError { + fn create_unhandled_error( + source: ::std::boxed::Box, + meta: ::std::option::Option<::aws_smithy_types::error::ErrorMetadata>, + ) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source, + meta: meta.unwrap_or_default(), + }) + } +} +impl ::aws_types::request_id::RequestId for crate::types::error::GetLogObjectResponseStreamError { + fn request_id(&self) -> Option<&str> { + self.meta().request_id() + } +} + pub use crate::types::error::_resource_already_exists_exception::ResourceAlreadyExistsException; mod _access_denied_exception; @@ -156,6 +261,8 @@ mod _conflict_exception; mod _data_already_accepted_exception; +mod _internal_streaming_exception; + mod _invalid_operation_exception; mod _invalid_parameter_exception; diff --git a/sdk/cloudwatchlogs/src/types/error/_internal_streaming_exception.rs b/sdk/cloudwatchlogs/src/types/error/_internal_streaming_exception.rs new file mode 100644 index 000000000000..f3be4b5b99d2 --- /dev/null +++ b/sdk/cloudwatchlogs/src/types/error/_internal_streaming_exception.rs @@ -0,0 +1,87 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +///

An internal error occurred during the streaming of log data. This exception is thrown when there's an issue with the internal streaming mechanism used by the GetLogObject operation.

+#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub struct InternalStreamingException { + #[allow(missing_docs)] // documentation missing in model + pub message: ::std::option::Option<::std::string::String>, + pub(crate) meta: ::aws_smithy_types::error::ErrorMetadata, +} +impl InternalStreamingException { + /// Returns the error message. + pub fn message(&self) -> ::std::option::Option<&str> { + self.message.as_deref() + } +} +impl ::std::fmt::Display for InternalStreamingException { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + ::std::write!(f, "InternalStreamingException")?; + if let ::std::option::Option::Some(inner_1) = &self.message { + { + ::std::write!(f, ": {}", inner_1)?; + } + } + Ok(()) + } +} +impl ::std::error::Error for InternalStreamingException {} +impl ::aws_types::request_id::RequestId for crate::types::error::InternalStreamingException { + fn request_id(&self) -> Option<&str> { + use ::aws_smithy_types::error::metadata::ProvideErrorMetadata; + self.meta().request_id() + } +} +impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for InternalStreamingException { + fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { + &self.meta + } +} +impl InternalStreamingException { + /// Creates a new builder-style object to manufacture [`InternalStreamingException`](crate::types::error::InternalStreamingException). + pub fn builder() -> crate::types::error::builders::InternalStreamingExceptionBuilder { + crate::types::error::builders::InternalStreamingExceptionBuilder::default() + } +} + +/// A builder for [`InternalStreamingException`](crate::types::error::InternalStreamingException). +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct InternalStreamingExceptionBuilder { + pub(crate) message: ::std::option::Option<::std::string::String>, + meta: std::option::Option<::aws_smithy_types::error::ErrorMetadata>, +} +impl InternalStreamingExceptionBuilder { + #[allow(missing_docs)] // documentation missing in model + pub fn message(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.message = ::std::option::Option::Some(input.into()); + self + } + #[allow(missing_docs)] // documentation missing in model + pub fn set_message(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.message = input; + self + } + #[allow(missing_docs)] // documentation missing in model + pub fn get_message(&self) -> &::std::option::Option<::std::string::String> { + &self.message + } + /// Sets error metadata + pub fn meta(mut self, meta: ::aws_smithy_types::error::ErrorMetadata) -> Self { + self.meta = Some(meta); + self + } + + /// Sets error metadata + pub fn set_meta(&mut self, meta: std::option::Option<::aws_smithy_types::error::ErrorMetadata>) -> &mut Self { + self.meta = meta; + self + } + /// Consumes the builder and constructs a [`InternalStreamingException`](crate::types::error::InternalStreamingException). + pub fn build(self) -> crate::types::error::InternalStreamingException { + crate::types::error::InternalStreamingException { + message: self.message, + meta: self.meta.unwrap_or_default(), + } + } +} diff --git a/sdk/cloudwatchlogs/src/types/error/builders.rs b/sdk/cloudwatchlogs/src/types/error/builders.rs index 55513b347eed..f6638ef4ccdc 100644 --- a/sdk/cloudwatchlogs/src/types/error/builders.rs +++ b/sdk/cloudwatchlogs/src/types/error/builders.rs @@ -35,4 +35,6 @@ pub use crate::types::error::_data_already_accepted_exception::DataAlreadyAccept pub use crate::types::error::_service_quota_exceeded_exception::ServiceQuotaExceededExceptionBuilder; +pub use crate::types::error::_internal_streaming_exception::InternalStreamingExceptionBuilder; + pub use crate::types::error::_resource_already_exists_exception::ResourceAlreadyExistsExceptionBuilder; diff --git a/sdk/mediaconvert/Cargo.toml b/sdk/mediaconvert/Cargo.toml index 671e6d11fc7d..9e957c8423c7 100644 --- a/sdk/mediaconvert/Cargo.toml +++ b/sdk/mediaconvert/Cargo.toml @@ -1,7 +1,7 @@ # Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. [package] name = "aws-sdk-mediaconvert" -version = "1.95.0" +version = "1.96.0" authors = ["AWS Rust SDK Team ", "Russell Cohen "] description = "AWS SDK for AWS Elemental MediaConvert" edition = "2021" diff --git a/sdk/mediaconvert/README.md b/sdk/mediaconvert/README.md index a18c0ae97fc2..7476395c4d58 100644 --- a/sdk/mediaconvert/README.md +++ b/sdk/mediaconvert/README.md @@ -14,7 +14,7 @@ your project, add the following to your **Cargo.toml** file: ```toml [dependencies] aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-mediaconvert = "1.95.0" +aws-sdk-mediaconvert = "1.96.0" tokio = { version = "1", features = ["full"] } ``` diff --git a/sdk/mediaconvert/src/lib.rs b/sdk/mediaconvert/src/lib.rs index f507882c84bb..08220f905343 100644 --- a/sdk/mediaconvert/src/lib.rs +++ b/sdk/mediaconvert/src/lib.rs @@ -32,7 +32,7 @@ //! ```toml //! [dependencies] //! aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -//! aws-sdk-mediaconvert = "1.95.0" +//! aws-sdk-mediaconvert = "1.96.0" //! tokio = { version = "1", features = ["full"] } //! ``` //! diff --git a/sdk/mediaconvert/src/protocol_serde.rs b/sdk/mediaconvert/src/protocol_serde.rs index 072b23897253..e22237cfa8d7 100644 --- a/sdk/mediaconvert/src/protocol_serde.rs +++ b/sdk/mediaconvert/src/protocol_serde.rs @@ -267,6 +267,8 @@ pub(crate) mod shape_input_clipping; pub(crate) mod shape_input_decryption_settings; +pub(crate) mod shape_input_tams_settings; + pub(crate) mod shape_input_video_generator; pub(crate) mod shape_list_of_audio_description; diff --git a/sdk/mediaconvert/src/protocol_serde/shape_input.rs b/sdk/mediaconvert/src/protocol_serde/shape_input.rs index 558087dd5204..8c8a23d8cf0a 100644 --- a/sdk/mediaconvert/src/protocol_serde/shape_input.rs +++ b/sdk/mediaconvert/src/protocol_serde/shape_input.rs @@ -142,35 +142,41 @@ pub fn ser_input( } array_46.finish(); } - if let Some(var_48) = &input.timecode_source { - object.key("timecodeSource").string(var_48.as_str()); + if let Some(var_48) = &input.tams_settings { + #[allow(unused_mut)] + let mut object_49 = object.key("tamsSettings").start_object(); + crate::protocol_serde::shape_input_tams_settings::ser_input_tams_settings(&mut object_49, var_48)?; + object_49.finish(); + } + if let Some(var_50) = &input.timecode_source { + object.key("timecodeSource").string(var_50.as_str()); } - if let Some(var_49) = &input.timecode_start { - object.key("timecodeStart").string(var_49.as_str()); + if let Some(var_51) = &input.timecode_start { + object.key("timecodeStart").string(var_51.as_str()); } - if let Some(var_50) = &input.video_generator { + if let Some(var_52) = &input.video_generator { #[allow(unused_mut)] - let mut object_51 = object.key("videoGenerator").start_object(); - crate::protocol_serde::shape_input_video_generator::ser_input_video_generator(&mut object_51, var_50)?; - object_51.finish(); + let mut object_53 = object.key("videoGenerator").start_object(); + crate::protocol_serde::shape_input_video_generator::ser_input_video_generator(&mut object_53, var_52)?; + object_53.finish(); } - if let Some(var_52) = &input.video_overlays { - let mut array_53 = object.key("videoOverlays").start_array(); - for item_54 in var_52 { + if let Some(var_54) = &input.video_overlays { + let mut array_55 = object.key("videoOverlays").start_array(); + for item_56 in var_54 { { #[allow(unused_mut)] - let mut object_55 = array_53.value().start_object(); - crate::protocol_serde::shape_video_overlay::ser_video_overlay(&mut object_55, item_54)?; - object_55.finish(); + let mut object_57 = array_55.value().start_object(); + crate::protocol_serde::shape_video_overlay::ser_video_overlay(&mut object_57, item_56)?; + object_57.finish(); } } - array_53.finish(); + array_55.finish(); } - if let Some(var_56) = &input.video_selector { + if let Some(var_58) = &input.video_selector { #[allow(unused_mut)] - let mut object_57 = object.key("videoSelector").start_object(); - crate::protocol_serde::shape_video_selector::ser_video_selector(&mut object_57, var_56)?; - object_57.finish(); + let mut object_59 = object.key("videoSelector").start_object(); + crate::protocol_serde::shape_video_selector::ser_video_selector(&mut object_59, var_58)?; + object_59.finish(); } Ok(()) } @@ -308,6 +314,9 @@ where )?, ); } + "tamsSettings" => { + builder = builder.set_tams_settings(crate::protocol_serde::shape_input_tams_settings::de_input_tams_settings(tokens)?); + } "timecodeSource" => { builder = builder.set_timecode_source( ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? diff --git a/sdk/mediaconvert/src/protocol_serde/shape_input_tams_settings.rs b/sdk/mediaconvert/src/protocol_serde/shape_input_tams_settings.rs new file mode 100644 index 000000000000..b55ed5e20803 --- /dev/null +++ b/sdk/mediaconvert/src/protocol_serde/shape_input_tams_settings.rs @@ -0,0 +1,80 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub fn ser_input_tams_settings( + object: &mut ::aws_smithy_json::serialize::JsonObjectWriter, + input: &crate::types::InputTamsSettings, +) -> ::std::result::Result<(), ::aws_smithy_types::error::operation::SerializationError> { + if let Some(var_1) = &input.auth_connection_arn { + object.key("authConnectionArn").string(var_1.as_str()); + } + if let Some(var_2) = &input.gap_handling { + object.key("gapHandling").string(var_2.as_str()); + } + if let Some(var_3) = &input.source_id { + object.key("sourceId").string(var_3.as_str()); + } + if let Some(var_4) = &input.timerange { + object.key("timerange").string(var_4.as_str()); + } + Ok(()) +} + +pub(crate) fn de_input_tams_settings<'a, I>( + tokens: &mut ::std::iter::Peekable, +) -> ::std::result::Result, ::aws_smithy_json::deserialize::error::DeserializeError> +where + I: Iterator, ::aws_smithy_json::deserialize::error::DeserializeError>>, +{ + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), + Some(::aws_smithy_json::deserialize::Token::StartObject { .. }) => { + #[allow(unused_mut)] + let mut builder = crate::types::builders::InputTamsSettingsBuilder::default(); + loop { + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::EndObject { .. }) => break, + Some(::aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => match key.to_unescaped()?.as_ref() { + "authConnectionArn" => { + builder = builder.set_auth_connection_arn( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?, + ); + } + "gapHandling" => { + builder = builder.set_gap_handling( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| crate::types::TamsGapHandling::from(u.as_ref()))) + .transpose()?, + ); + } + "sourceId" => { + builder = builder.set_source_id( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?, + ); + } + "timerange" => { + builder = builder.set_timerange( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?, + ); + } + _ => ::aws_smithy_json::deserialize::token::skip_value(tokens)?, + }, + other => { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom(format!( + "expected object key or end object, found: {:?}", + other + ))) + } + } + } + Ok(Some(builder.build())) + } + _ => Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "expected start object or null", + )), + } +} diff --git a/sdk/mediaconvert/src/types.rs b/sdk/mediaconvert/src/types.rs index 2b8e8bcfe352..71b4103e6dda 100644 --- a/sdk/mediaconvert/src/types.rs +++ b/sdk/mediaconvert/src/types.rs @@ -1215,6 +1215,10 @@ pub use crate::types::_input::Input; pub use crate::types::_input_video_generator::InputVideoGenerator; +pub use crate::types::_input_tams_settings::InputTamsSettings; + +pub use crate::types::_tams_gap_handling::TamsGapHandling; + pub use crate::types::_input_decryption_settings::InputDecryptionSettings; pub use crate::types::_decryption_mode::DecryptionMode; @@ -2013,6 +2017,8 @@ mod _input_sample_range; mod _input_scan_type; +mod _input_tams_settings; + mod _input_template; mod _input_timecode_source; @@ -2375,6 +2381,8 @@ mod _static_key_provider; mod _status_update_interval; +mod _tams_gap_handling; + mod _teletext_destination_settings; mod _teletext_page_type; diff --git a/sdk/mediaconvert/src/types/_input.rs b/sdk/mediaconvert/src/types/_input.rs index c265580dd75c..17621d43a0b9 100644 --- a/sdk/mediaconvert/src/types/_input.rs +++ b/sdk/mediaconvert/src/types/_input.rs @@ -26,7 +26,7 @@ pub struct Input { pub dolby_vision_metadata_xml: ::std::option::Option<::std::string::String>, /// Use Dynamic audio selectors when you do not know the track layout of your source when you submit your job, but want to select multiple audio tracks. When you include an audio track in your output and specify this Dynamic audio selector as the Audio source, MediaConvert creates an output audio track for each dynamically selected track. Note that when you include a Dynamic audio selector for two or more inputs, each input must have the same number of audio tracks and audio channels. pub dynamic_audio_selectors: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::DynamicAudioSelector>>, - /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL. + /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. For standard inputs, provide the path to your S3, HTTP, or HTTPS source file. For example, s3://amzn-s3-demo-bucket/input.mp4 for an Amazon S3 input or https://example.com/input.mp4 for an HTTPS input. For TAMS inputs, specify the HTTPS endpoint of your TAMS server. For example, https://tams-server.example.com . When you do, also specify Source ID, Timerange, GAP handling, and the Authorization connection ARN under TAMS settings. (Don't include these parameters in the Input file URL.) For IMF inputs, specify your input by providing the path to your CPL. For example, s3://amzn-s3-demo-bucket/vf/cpl.xml . If the CPL is in an incomplete IMP, make sure to use Supplemental IMPsto specify any supplemental IMPs that contain assets referenced by the CPL. pub file_input: ::std::option::Option<::std::string::String>, /// Specify whether to apply input filtering to improve the video quality of your input. To apply filtering depending on your input type and quality: Choose Auto. To apply no filtering: Choose Disable. To apply filtering regardless of your input type and quality: Choose Force. When you do, you must also specify a value for Filter strength. pub filter_enable: ::std::option::Option, @@ -46,6 +46,8 @@ pub struct Input { pub psi_control: ::std::option::Option, /// Provide a list of any necessary supplemental IMPs. You need supplemental IMPs if the CPL that you're using for your input is in an incomplete IMP. Specify either the supplemental IMP directories with a trailing slash or the ASSETMAP.xml files. For example \["s3://bucket/ov/", "s3://bucket/vf2/ASSETMAP.xml"\]. You don't need to specify the IMP that contains your input CPL, because the service automatically detects it. pub supplemental_imps: ::std::option::Option<::std::vec::Vec<::std::string::String>>, + /// Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it + pub tams_settings: ::std::option::Option, /// Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. pub timecode_source: ::std::option::Option, /// Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. @@ -106,7 +108,7 @@ impl Input { ) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, crate::types::DynamicAudioSelector>> { self.dynamic_audio_selectors.as_ref() } - /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL. + /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. For standard inputs, provide the path to your S3, HTTP, or HTTPS source file. For example, s3://amzn-s3-demo-bucket/input.mp4 for an Amazon S3 input or https://example.com/input.mp4 for an HTTPS input. For TAMS inputs, specify the HTTPS endpoint of your TAMS server. For example, https://tams-server.example.com . When you do, also specify Source ID, Timerange, GAP handling, and the Authorization connection ARN under TAMS settings. (Don't include these parameters in the Input file URL.) For IMF inputs, specify your input by providing the path to your CPL. For example, s3://amzn-s3-demo-bucket/vf/cpl.xml . If the CPL is in an incomplete IMP, make sure to use Supplemental IMPsto specify any supplemental IMPs that contain assets referenced by the CPL. pub fn file_input(&self) -> ::std::option::Option<&str> { self.file_input.as_deref() } @@ -150,6 +152,10 @@ impl Input { pub fn supplemental_imps(&self) -> &[::std::string::String] { self.supplemental_imps.as_deref().unwrap_or_default() } + /// Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it + pub fn tams_settings(&self) -> ::std::option::Option<&crate::types::InputTamsSettings> { + self.tams_settings.as_ref() + } /// Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. pub fn timecode_source(&self) -> ::std::option::Option<&crate::types::InputTimecodeSource> { self.timecode_source.as_ref() @@ -205,6 +211,7 @@ pub struct InputBuilder { pub(crate) program_number: ::std::option::Option, pub(crate) psi_control: ::std::option::Option, pub(crate) supplemental_imps: ::std::option::Option<::std::vec::Vec<::std::string::String>>, + pub(crate) tams_settings: ::std::option::Option, pub(crate) timecode_source: ::std::option::Option, pub(crate) timecode_start: ::std::option::Option<::std::string::String>, pub(crate) video_generator: ::std::option::Option, @@ -406,17 +413,17 @@ impl InputBuilder { ) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::DynamicAudioSelector>> { &self.dynamic_audio_selectors } - /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL. + /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. For standard inputs, provide the path to your S3, HTTP, or HTTPS source file. For example, s3://amzn-s3-demo-bucket/input.mp4 for an Amazon S3 input or https://example.com/input.mp4 for an HTTPS input. For TAMS inputs, specify the HTTPS endpoint of your TAMS server. For example, https://tams-server.example.com . When you do, also specify Source ID, Timerange, GAP handling, and the Authorization connection ARN under TAMS settings. (Don't include these parameters in the Input file URL.) For IMF inputs, specify your input by providing the path to your CPL. For example, s3://amzn-s3-demo-bucket/vf/cpl.xml . If the CPL is in an incomplete IMP, make sure to use Supplemental IMPsto specify any supplemental IMPs that contain assets referenced by the CPL. pub fn file_input(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.file_input = ::std::option::Option::Some(input.into()); self } - /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL. + /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. For standard inputs, provide the path to your S3, HTTP, or HTTPS source file. For example, s3://amzn-s3-demo-bucket/input.mp4 for an Amazon S3 input or https://example.com/input.mp4 for an HTTPS input. For TAMS inputs, specify the HTTPS endpoint of your TAMS server. For example, https://tams-server.example.com . When you do, also specify Source ID, Timerange, GAP handling, and the Authorization connection ARN under TAMS settings. (Don't include these parameters in the Input file URL.) For IMF inputs, specify your input by providing the path to your CPL. For example, s3://amzn-s3-demo-bucket/vf/cpl.xml . If the CPL is in an incomplete IMP, make sure to use Supplemental IMPsto specify any supplemental IMPs that contain assets referenced by the CPL. pub fn set_file_input(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.file_input = input; self } - /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL. + /// Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. For standard inputs, provide the path to your S3, HTTP, or HTTPS source file. For example, s3://amzn-s3-demo-bucket/input.mp4 for an Amazon S3 input or https://example.com/input.mp4 for an HTTPS input. For TAMS inputs, specify the HTTPS endpoint of your TAMS server. For example, https://tams-server.example.com . When you do, also specify Source ID, Timerange, GAP handling, and the Authorization connection ARN under TAMS settings. (Don't include these parameters in the Input file URL.) For IMF inputs, specify your input by providing the path to your CPL. For example, s3://amzn-s3-demo-bucket/vf/cpl.xml . If the CPL is in an incomplete IMP, make sure to use Supplemental IMPsto specify any supplemental IMPs that contain assets referenced by the CPL. pub fn get_file_input(&self) -> &::std::option::Option<::std::string::String> { &self.file_input } @@ -558,6 +565,20 @@ impl InputBuilder { pub fn get_supplemental_imps(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> { &self.supplemental_imps } + /// Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it + pub fn tams_settings(mut self, input: crate::types::InputTamsSettings) -> Self { + self.tams_settings = ::std::option::Option::Some(input); + self + } + /// Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it + pub fn set_tams_settings(mut self, input: ::std::option::Option) -> Self { + self.tams_settings = input; + self + } + /// Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it + pub fn get_tams_settings(&self) -> &::std::option::Option { + &self.tams_settings + } /// Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. pub fn timecode_source(mut self, input: crate::types::InputTimecodeSource) -> Self { self.timecode_source = ::std::option::Option::Some(input); @@ -658,6 +679,7 @@ impl InputBuilder { program_number: self.program_number, psi_control: self.psi_control, supplemental_imps: self.supplemental_imps, + tams_settings: self.tams_settings, timecode_source: self.timecode_source, timecode_start: self.timecode_start, video_generator: self.video_generator, diff --git a/sdk/mediaconvert/src/types/_input_tams_settings.rs b/sdk/mediaconvert/src/types/_input_tams_settings.rs new file mode 100644 index 000000000000..58d1d26abf5d --- /dev/null +++ b/sdk/mediaconvert/src/types/_input_tams_settings.rs @@ -0,0 +1,116 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +/// Specify a Time Addressable Media Store (TAMS) server as an input source. TAMS is an open-source API specification that provides access to time-segmented media content. Use TAMS to retrieve specific time ranges from live or archived media streams. When you specify TAMS settings, MediaConvert connects to your TAMS server, retrieves the media segments for your specified time range, and processes them as a single input. This enables workflows like extracting clips from live streams or processing specific portions of archived content. To use TAMS, you must: 1. Have access to a TAMS-compliant server 2. Specify the server URL in the Input file URL field 3. Provide the required SourceId and Timerange parameters 4. Configure authentication, if your TAMS server requires it +#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub struct InputTamsSettings { + /// Specify the ARN (Amazon Resource Name) of an EventBridge Connection to authenticate with your TAMS server. The EventBridge Connection stores your authentication credentials securely. MediaConvert assumes your job's IAM role to access this connection, so ensure the role has the events:RetrieveConnectionCredentials, secretsmanager:DescribeSecret, and secretsmanager:GetSecretValue permissions. Format: arn:aws:events:region:account-id:connection/connection-name/unique-id + pub auth_connection_arn: ::std::option::Option<::std::string::String>, + /// Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps. + pub gap_handling: ::std::option::Option, + /// Specify the unique identifier for the media source in your TAMS server. MediaConvert uses this source ID to locate the appropriate flows containing the media segments you want to process. The source ID corresponds to a specific media source registered in your TAMS server. This source must be of type urn:x-nmos:format:multi, and can can reference multiple flows for audio, video, or combined audio/video content. MediaConvert automatically selects the highest quality flows available for your job. This setting is required when include TAMS settings in your job. + pub source_id: ::std::option::Option<::std::string::String>, + /// Specify the time range of media segments to retrieve from your TAMS server. MediaConvert fetches only the segments that fall within this range. Use the format specified by your TAMS server implementation. This must be two timestamp values with the format {sign?}{seconds}:{nanoseconds}, separated by an underscore, surrounded by either parentheses or square brackets. Example: \[15:0_35:0) This setting is required when include TAMS settings in your job. + pub timerange: ::std::option::Option<::std::string::String>, +} +impl InputTamsSettings { + /// Specify the ARN (Amazon Resource Name) of an EventBridge Connection to authenticate with your TAMS server. The EventBridge Connection stores your authentication credentials securely. MediaConvert assumes your job's IAM role to access this connection, so ensure the role has the events:RetrieveConnectionCredentials, secretsmanager:DescribeSecret, and secretsmanager:GetSecretValue permissions. Format: arn:aws:events:region:account-id:connection/connection-name/unique-id + pub fn auth_connection_arn(&self) -> ::std::option::Option<&str> { + self.auth_connection_arn.as_deref() + } + /// Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps. + pub fn gap_handling(&self) -> ::std::option::Option<&crate::types::TamsGapHandling> { + self.gap_handling.as_ref() + } + /// Specify the unique identifier for the media source in your TAMS server. MediaConvert uses this source ID to locate the appropriate flows containing the media segments you want to process. The source ID corresponds to a specific media source registered in your TAMS server. This source must be of type urn:x-nmos:format:multi, and can can reference multiple flows for audio, video, or combined audio/video content. MediaConvert automatically selects the highest quality flows available for your job. This setting is required when include TAMS settings in your job. + pub fn source_id(&self) -> ::std::option::Option<&str> { + self.source_id.as_deref() + } + /// Specify the time range of media segments to retrieve from your TAMS server. MediaConvert fetches only the segments that fall within this range. Use the format specified by your TAMS server implementation. This must be two timestamp values with the format {sign?}{seconds}:{nanoseconds}, separated by an underscore, surrounded by either parentheses or square brackets. Example: \[15:0_35:0) This setting is required when include TAMS settings in your job. + pub fn timerange(&self) -> ::std::option::Option<&str> { + self.timerange.as_deref() + } +} +impl InputTamsSettings { + /// Creates a new builder-style object to manufacture [`InputTamsSettings`](crate::types::InputTamsSettings). + pub fn builder() -> crate::types::builders::InputTamsSettingsBuilder { + crate::types::builders::InputTamsSettingsBuilder::default() + } +} + +/// A builder for [`InputTamsSettings`](crate::types::InputTamsSettings). +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct InputTamsSettingsBuilder { + pub(crate) auth_connection_arn: ::std::option::Option<::std::string::String>, + pub(crate) gap_handling: ::std::option::Option, + pub(crate) source_id: ::std::option::Option<::std::string::String>, + pub(crate) timerange: ::std::option::Option<::std::string::String>, +} +impl InputTamsSettingsBuilder { + /// Specify the ARN (Amazon Resource Name) of an EventBridge Connection to authenticate with your TAMS server. The EventBridge Connection stores your authentication credentials securely. MediaConvert assumes your job's IAM role to access this connection, so ensure the role has the events:RetrieveConnectionCredentials, secretsmanager:DescribeSecret, and secretsmanager:GetSecretValue permissions. Format: arn:aws:events:region:account-id:connection/connection-name/unique-id + pub fn auth_connection_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.auth_connection_arn = ::std::option::Option::Some(input.into()); + self + } + /// Specify the ARN (Amazon Resource Name) of an EventBridge Connection to authenticate with your TAMS server. The EventBridge Connection stores your authentication credentials securely. MediaConvert assumes your job's IAM role to access this connection, so ensure the role has the events:RetrieveConnectionCredentials, secretsmanager:DescribeSecret, and secretsmanager:GetSecretValue permissions. Format: arn:aws:events:region:account-id:connection/connection-name/unique-id + pub fn set_auth_connection_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.auth_connection_arn = input; + self + } + /// Specify the ARN (Amazon Resource Name) of an EventBridge Connection to authenticate with your TAMS server. The EventBridge Connection stores your authentication credentials securely. MediaConvert assumes your job's IAM role to access this connection, so ensure the role has the events:RetrieveConnectionCredentials, secretsmanager:DescribeSecret, and secretsmanager:GetSecretValue permissions. Format: arn:aws:events:region:account-id:connection/connection-name/unique-id + pub fn get_auth_connection_arn(&self) -> &::std::option::Option<::std::string::String> { + &self.auth_connection_arn + } + /// Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps. + pub fn gap_handling(mut self, input: crate::types::TamsGapHandling) -> Self { + self.gap_handling = ::std::option::Option::Some(input); + self + } + /// Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps. + pub fn set_gap_handling(mut self, input: ::std::option::Option) -> Self { + self.gap_handling = input; + self + } + /// Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps. + pub fn get_gap_handling(&self) -> &::std::option::Option { + &self.gap_handling + } + /// Specify the unique identifier for the media source in your TAMS server. MediaConvert uses this source ID to locate the appropriate flows containing the media segments you want to process. The source ID corresponds to a specific media source registered in your TAMS server. This source must be of type urn:x-nmos:format:multi, and can can reference multiple flows for audio, video, or combined audio/video content. MediaConvert automatically selects the highest quality flows available for your job. This setting is required when include TAMS settings in your job. + pub fn source_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.source_id = ::std::option::Option::Some(input.into()); + self + } + /// Specify the unique identifier for the media source in your TAMS server. MediaConvert uses this source ID to locate the appropriate flows containing the media segments you want to process. The source ID corresponds to a specific media source registered in your TAMS server. This source must be of type urn:x-nmos:format:multi, and can can reference multiple flows for audio, video, or combined audio/video content. MediaConvert automatically selects the highest quality flows available for your job. This setting is required when include TAMS settings in your job. + pub fn set_source_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.source_id = input; + self + } + /// Specify the unique identifier for the media source in your TAMS server. MediaConvert uses this source ID to locate the appropriate flows containing the media segments you want to process. The source ID corresponds to a specific media source registered in your TAMS server. This source must be of type urn:x-nmos:format:multi, and can can reference multiple flows for audio, video, or combined audio/video content. MediaConvert automatically selects the highest quality flows available for your job. This setting is required when include TAMS settings in your job. + pub fn get_source_id(&self) -> &::std::option::Option<::std::string::String> { + &self.source_id + } + /// Specify the time range of media segments to retrieve from your TAMS server. MediaConvert fetches only the segments that fall within this range. Use the format specified by your TAMS server implementation. This must be two timestamp values with the format {sign?}{seconds}:{nanoseconds}, separated by an underscore, surrounded by either parentheses or square brackets. Example: \[15:0_35:0) This setting is required when include TAMS settings in your job. + pub fn timerange(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.timerange = ::std::option::Option::Some(input.into()); + self + } + /// Specify the time range of media segments to retrieve from your TAMS server. MediaConvert fetches only the segments that fall within this range. Use the format specified by your TAMS server implementation. This must be two timestamp values with the format {sign?}{seconds}:{nanoseconds}, separated by an underscore, surrounded by either parentheses or square brackets. Example: \[15:0_35:0) This setting is required when include TAMS settings in your job. + pub fn set_timerange(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.timerange = input; + self + } + /// Specify the time range of media segments to retrieve from your TAMS server. MediaConvert fetches only the segments that fall within this range. Use the format specified by your TAMS server implementation. This must be two timestamp values with the format {sign?}{seconds}:{nanoseconds}, separated by an underscore, surrounded by either parentheses or square brackets. Example: \[15:0_35:0) This setting is required when include TAMS settings in your job. + pub fn get_timerange(&self) -> &::std::option::Option<::std::string::String> { + &self.timerange + } + /// Consumes the builder and constructs a [`InputTamsSettings`](crate::types::InputTamsSettings). + pub fn build(self) -> crate::types::InputTamsSettings { + crate::types::InputTamsSettings { + auth_connection_arn: self.auth_connection_arn, + gap_handling: self.gap_handling, + source_id: self.source_id, + timerange: self.timerange, + } + } +} diff --git a/sdk/mediaconvert/src/types/_tams_gap_handling.rs b/sdk/mediaconvert/src/types/_tams_gap_handling.rs new file mode 100644 index 000000000000..1592f10415ee --- /dev/null +++ b/sdk/mediaconvert/src/types/_tams_gap_handling.rs @@ -0,0 +1,114 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +/// When writing a match expression against `TamsGapHandling`, it is important to ensure +/// your code is forward-compatible. That is, if a match arm handles a case for a +/// feature that is supported by the service but has not been represented as an enum +/// variant in a current version of SDK, your code should continue to work when you +/// upgrade SDK to a future version in which the enum does include a variant for that +/// feature. +/// +/// Here is an example of how you can make a match expression forward-compatible: +/// +/// ```text +/// # let tamsgaphandling = unimplemented!(); +/// match tamsgaphandling { +/// TamsGapHandling::FillWithBlack => { /* ... */ }, +/// TamsGapHandling::HoldLastFrame => { /* ... */ }, +/// TamsGapHandling::SkipGaps => { /* ... */ }, +/// other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ }, +/// _ => { /* ... */ }, +/// } +/// ``` +/// The above code demonstrates that when `tamsgaphandling` represents +/// `NewFeature`, the execution path will lead to the second last match arm, +/// even though the enum does not contain a variant `TamsGapHandling::NewFeature` +/// in the current version of SDK. The reason is that the variable `other`, +/// created by the `@` operator, is bound to +/// `TamsGapHandling::Unknown(UnknownVariantValue("NewFeature".to_owned()))` +/// and calling `as_str` on it yields `"NewFeature"`. +/// This match expression is forward-compatible when executed with a newer +/// version of SDK where the variant `TamsGapHandling::NewFeature` is defined. +/// Specifically, when `tamsgaphandling` represents `NewFeature`, +/// the execution path will hit the second last match arm as before by virtue of +/// calling `as_str` on `TamsGapHandling::NewFeature` also yielding `"NewFeature"`. +/// +/// Explicitly matching on the `Unknown` variant should +/// be avoided for two reasons: +/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted. +/// - It might inadvertently shadow other intended match arms. +/// +/// Specify how MediaConvert handles gaps between media segments in your TAMS source. Gaps can occur in live streams due to network issues or other interruptions. Choose from the following options: * Skip gaps - Default. Skip over gaps and join segments together. This creates a continuous output with no blank frames, but may cause timeline discontinuities. * Fill with black - Insert black frames to fill gaps between segments. This maintains timeline continuity but adds black frames where content is missing. * Hold last frame - Repeat the last frame before a gap until the next segment begins. This maintains visual continuity during gaps. +#[non_exhaustive] +#[derive( + ::std::clone::Clone, ::std::cmp::Eq, ::std::cmp::Ord, ::std::cmp::PartialEq, ::std::cmp::PartialOrd, ::std::fmt::Debug, ::std::hash::Hash, +)] +pub enum TamsGapHandling { + #[allow(missing_docs)] // documentation missing in model + FillWithBlack, + #[allow(missing_docs)] // documentation missing in model + HoldLastFrame, + #[allow(missing_docs)] // documentation missing in model + SkipGaps, + /// `Unknown` contains new variants that have been added since this code was generated. + #[deprecated(note = "Don't directly match on `Unknown`. See the docs on this enum for the correct way to handle unknown variants.")] + Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue), +} +impl ::std::convert::From<&str> for TamsGapHandling { + fn from(s: &str) -> Self { + match s { + "FILL_WITH_BLACK" => TamsGapHandling::FillWithBlack, + "HOLD_LAST_FRAME" => TamsGapHandling::HoldLastFrame, + "SKIP_GAPS" => TamsGapHandling::SkipGaps, + other => TamsGapHandling::Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue(other.to_owned())), + } + } +} +impl ::std::str::FromStr for TamsGapHandling { + type Err = ::std::convert::Infallible; + + fn from_str(s: &str) -> ::std::result::Result::Err> { + ::std::result::Result::Ok(TamsGapHandling::from(s)) + } +} +impl TamsGapHandling { + /// Returns the `&str` value of the enum member. + pub fn as_str(&self) -> &str { + match self { + TamsGapHandling::FillWithBlack => "FILL_WITH_BLACK", + TamsGapHandling::HoldLastFrame => "HOLD_LAST_FRAME", + TamsGapHandling::SkipGaps => "SKIP_GAPS", + TamsGapHandling::Unknown(value) => value.as_str(), + } + } + /// Returns all the `&str` representations of the enum members. + pub const fn values() -> &'static [&'static str] { + &["FILL_WITH_BLACK", "HOLD_LAST_FRAME", "SKIP_GAPS"] + } +} +impl ::std::convert::AsRef for TamsGapHandling { + fn as_ref(&self) -> &str { + self.as_str() + } +} +impl TamsGapHandling { + /// Parses the enum value while disallowing unknown variants. + /// + /// Unknown variants will result in an error. + pub fn try_parse(value: &str) -> ::std::result::Result { + match Self::from(value) { + #[allow(deprecated)] + Self::Unknown(_) => ::std::result::Result::Err(crate::error::UnknownVariantError::new(value)), + known => Ok(known), + } + } +} +impl ::std::fmt::Display for TamsGapHandling { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match self { + TamsGapHandling::FillWithBlack => write!(f, "FILL_WITH_BLACK"), + TamsGapHandling::HoldLastFrame => write!(f, "HOLD_LAST_FRAME"), + TamsGapHandling::SkipGaps => write!(f, "SKIP_GAPS"), + TamsGapHandling::Unknown(value) => write!(f, "{}", value), + } + } +} diff --git a/sdk/mediaconvert/src/types/_video_overlay_crop.rs b/sdk/mediaconvert/src/types/_video_overlay_crop.rs index 282b0e547781..c1e92a4964a8 100644 --- a/sdk/mediaconvert/src/types/_video_overlay_crop.rs +++ b/sdk/mediaconvert/src/types/_video_overlay_crop.rs @@ -4,11 +4,11 @@ #[non_exhaustive] #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] pub struct VideoOverlayCrop { - /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. + /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. pub height: ::std::option::Option, /// Specify the Unit type to use when you enter a value for X position, Y position, Width, or Height. You can choose Pixels or Percentage. Leave blank to use the default value, Pixels. pub unit: ::std::option::Option, - /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. + /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. pub width: ::std::option::Option, /// Specify the distance between the cropping rectangle and the left edge of your overlay video's frame. To position the cropping rectangle along the left edge: Keep blank, or enter 0. To position the cropping rectangle to the right, relative to the left edge of your overlay video's frame: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 10 and choose Pixels, the cropping rectangle will be positioned 10 pixels from the left edge of the overlay video's frame. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be positioned 192 pixels from the left edge of the overlay video's frame. pub x: ::std::option::Option, @@ -16,7 +16,7 @@ pub struct VideoOverlayCrop { pub y: ::std::option::Option, } impl VideoOverlayCrop { - /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. + /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. pub fn height(&self) -> ::std::option::Option { self.height } @@ -24,7 +24,7 @@ impl VideoOverlayCrop { pub fn unit(&self) -> ::std::option::Option<&crate::types::VideoOverlayUnit> { self.unit.as_ref() } - /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. + /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. pub fn width(&self) -> ::std::option::Option { self.width } @@ -55,17 +55,17 @@ pub struct VideoOverlayCropBuilder { pub(crate) y: ::std::option::Option, } impl VideoOverlayCropBuilder { - /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. + /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. pub fn height(mut self, input: i32) -> Self { self.height = ::std::option::Option::Some(input); self } - /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. + /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. pub fn set_height(mut self, input: ::std::option::Option) -> Self { self.height = input; self } - /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. + /// Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high. pub fn get_height(&self) -> &::std::option::Option { &self.height } @@ -83,17 +83,17 @@ impl VideoOverlayCropBuilder { pub fn get_unit(&self) -> &::std::option::Option { &self.unit } - /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. + /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. pub fn width(mut self, input: i32) -> Self { self.width = ::std::option::Option::Some(input); self } - /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. + /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. pub fn set_width(mut self, input: ::std::option::Option) -> Self { self.width = input; self } - /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. + /// Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will be 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide. pub fn get_width(&self) -> &::std::option::Option { &self.width } diff --git a/sdk/mediaconvert/src/types/builders.rs b/sdk/mediaconvert/src/types/builders.rs index 4beaa6407853..276d5291d488 100644 --- a/sdk/mediaconvert/src/types/builders.rs +++ b/sdk/mediaconvert/src/types/builders.rs @@ -357,6 +357,8 @@ pub use crate::types::_input::InputBuilder; pub use crate::types::_input_video_generator::InputVideoGeneratorBuilder; +pub use crate::types::_input_tams_settings::InputTamsSettingsBuilder; + pub use crate::types::_input_decryption_settings::InputDecryptionSettingsBuilder; pub use crate::types::_queue_transition::QueueTransitionBuilder; diff --git a/sdk/outposts/Cargo.toml b/sdk/outposts/Cargo.toml index 10d00f8e398c..0c9e267ec6ae 100644 --- a/sdk/outposts/Cargo.toml +++ b/sdk/outposts/Cargo.toml @@ -1,7 +1,7 @@ # Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. [package] name = "aws-sdk-outposts" -version = "1.84.0" +version = "1.85.0" authors = ["AWS Rust SDK Team ", "Russell Cohen "] description = "AWS SDK for AWS Outposts" edition = "2021" diff --git a/sdk/outposts/README.md b/sdk/outposts/README.md index 5b842221fa64..eceddd389917 100644 --- a/sdk/outposts/README.md +++ b/sdk/outposts/README.md @@ -14,7 +14,7 @@ your project, add the following to your **Cargo.toml** file: ```toml [dependencies] aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-outposts = "1.84.0" +aws-sdk-outposts = "1.85.0" tokio = { version = "1", features = ["full"] } ``` diff --git a/sdk/outposts/src/client.rs b/sdk/outposts/src/client.rs index d8c2b77f7c6f..af43f016c2b2 100644 --- a/sdk/outposts/src/client.rs +++ b/sdk/outposts/src/client.rs @@ -187,6 +187,8 @@ mod get_order; mod get_outpost; +mod get_outpost_billing_information; + mod get_outpost_instance_types; mod get_outpost_supported_instance_types; diff --git a/sdk/outposts/src/client/get_outpost_billing_information.rs b/sdk/outposts/src/client/get_outpost_billing_information.rs new file mode 100644 index 000000000000..a3f927285e8a --- /dev/null +++ b/sdk/outposts/src/client/get_outpost_billing_information.rs @@ -0,0 +1,20 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +impl super::Client { + /// Constructs a fluent builder for the [`GetOutpostBillingInformation`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder) operation. + /// This operation supports pagination; See [`into_paginator()`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::into_paginator). + /// + /// - The fluent builder is configurable: + /// - [`next_token(impl Into)`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::next_token) / [`set_next_token(Option)`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::set_next_token):
required: **false**

The pagination token.


+ /// - [`max_results(i32)`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::max_results) / [`set_max_results(Option)`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::set_max_results):
required: **false**

The maximum page size.


+ /// - [`outpost_identifier(impl Into)`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::outpost_identifier) / [`set_outpost_identifier(Option)`](crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::set_outpost_identifier):
required: **true**

The ID or ARN of the Outpost.


+ /// - On success, responds with [`GetOutpostBillingInformationOutput`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput) with field(s): + /// - [`next_token(Option)`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput::next_token):

The pagination token.

+ /// - [`subscriptions(Option>)`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput::subscriptions):

The subscription details for the specified Outpost.

+ /// - [`contract_end_date(Option)`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput::contract_end_date):

The date the current contract term ends for the specified Outpost. You must start the renewal or decommission process at least 5 business days before the current term for your Amazon Web Services Outposts ends. Failing to complete these steps at least 5 business days before the current term ends might result in unanticipated charges.

+ /// - On failure, responds with [`SdkError`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError) + pub fn get_outpost_billing_information( + &self, + ) -> crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder { + crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationFluentBuilder::new(self.handle.clone()) + } +} diff --git a/sdk/outposts/src/error_meta.rs b/sdk/outposts/src/error_meta.rs index bad52eb7f6ea..65bd9de22542 100644 --- a/sdk/outposts/src/error_meta.rs +++ b/sdk/outposts/src/error_meta.rs @@ -290,6 +290,7 @@ where impl From for Error { fn from(err: crate::operation::get_catalog_item::GetCatalogItemError) -> Self { match err { + crate::operation::get_catalog_item::GetCatalogItemError::AccessDeniedException(inner) => Error::AccessDeniedException(inner), crate::operation::get_catalog_item::GetCatalogItemError::InternalServerException(inner) => Error::InternalServerException(inner), crate::operation::get_catalog_item::GetCatalogItemError::NotFoundException(inner) => Error::NotFoundException(inner), crate::operation::get_catalog_item::GetCatalogItemError::ValidationException(inner) => Error::ValidationException(inner), @@ -371,6 +372,43 @@ impl From for Error { } } } +impl + From<::aws_smithy_runtime_api::client::result::SdkError> + for Error +where + R: Send + Sync + std::fmt::Debug + 'static, +{ + fn from( + err: ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + R, + >, + ) -> Self { + match err { + ::aws_smithy_runtime_api::client::result::SdkError::ServiceError(context) => Self::from(context.into_err()), + _ => Error::Unhandled(crate::error::sealed_unhandled::Unhandled { + meta: ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(&err).clone(), + source: err.into(), + }), + } + } +} +impl From for Error { + fn from(err: crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError) -> Self { + match err { + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::AccessDeniedException(inner) => { + Error::AccessDeniedException(inner) + } + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::InternalServerException(inner) => { + Error::InternalServerException(inner) + } + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::NotFoundException(inner) => { + Error::NotFoundException(inner) + } + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::Unhandled(inner) => Error::Unhandled(inner), + } + } +} impl From<::aws_smithy_runtime_api::client::result::SdkError> for Error where @@ -639,6 +677,7 @@ where impl From for Error { fn from(err: crate::operation::list_catalog_items::ListCatalogItemsError) -> Self { match err { + crate::operation::list_catalog_items::ListCatalogItemsError::AccessDeniedException(inner) => Error::AccessDeniedException(inner), crate::operation::list_catalog_items::ListCatalogItemsError::InternalServerException(inner) => Error::InternalServerException(inner), crate::operation::list_catalog_items::ListCatalogItemsError::NotFoundException(inner) => Error::NotFoundException(inner), crate::operation::list_catalog_items::ListCatalogItemsError::ValidationException(inner) => Error::ValidationException(inner), diff --git a/sdk/outposts/src/lens.rs b/sdk/outposts/src/lens.rs index 798d19dd3b80..46a1580101c9 100644 --- a/sdk/outposts/src/lens.rs +++ b/sdk/outposts/src/lens.rs @@ -1,4 +1,14 @@ // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub(crate) fn reflens_get_outpost_billing_information_output_output_next_token( + input: &crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, +) -> ::std::option::Option<&::std::string::String> { + let input = match &input.next_token { + ::std::option::Option::None => return ::std::option::Option::None, + ::std::option::Option::Some(t) => t, + }; + ::std::option::Option::Some(input) +} + pub(crate) fn reflens_get_outpost_instance_types_output_output_next_token( input: &crate::operation::get_outpost_instance_types::GetOutpostInstanceTypesOutput, ) -> ::std::option::Option<&::std::string::String> { @@ -99,6 +109,13 @@ pub(crate) fn reflens_list_sites_output_output_next_token( ::std::option::Option::Some(input) } +pub(crate) fn lens_get_outpost_billing_information_output_output_subscriptions( + input: crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, +) -> ::std::option::Option<::std::vec::Vec> { + let input = input.subscriptions?; + ::std::option::Option::Some(input) +} + pub(crate) fn lens_get_outpost_instance_types_output_output_instance_types( input: crate::operation::get_outpost_instance_types::GetOutpostInstanceTypesOutput, ) -> ::std::option::Option<::std::vec::Vec> { diff --git a/sdk/outposts/src/lib.rs b/sdk/outposts/src/lib.rs index ff5b992bc571..2fa2969898f8 100644 --- a/sdk/outposts/src/lib.rs +++ b/sdk/outposts/src/lib.rs @@ -32,7 +32,7 @@ //! ```toml //! [dependencies] //! aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -//! aws-sdk-outposts = "1.84.0" +//! aws-sdk-outposts = "1.85.0" //! tokio = { version = "1", features = ["full"] } //! ``` //! diff --git a/sdk/outposts/src/operation.rs b/sdk/outposts/src/operation.rs index f3db85b5b4d3..b26d6dc6aa1b 100644 --- a/sdk/outposts/src/operation.rs +++ b/sdk/outposts/src/operation.rs @@ -37,6 +37,9 @@ pub mod get_order; /// Types for the `GetOutpost` operation. pub mod get_outpost; +/// Types for the `GetOutpostBillingInformation` operation. +pub mod get_outpost_billing_information; + /// Types for the `GetOutpostInstanceTypes` operation. pub mod get_outpost_instance_types; diff --git a/sdk/outposts/src/operation/get_catalog_item.rs b/sdk/outposts/src/operation/get_catalog_item.rs index 568b48bbfdf0..9f6113614db8 100644 --- a/sdk/outposts/src/operation/get_catalog_item.rs +++ b/sdk/outposts/src/operation/get_catalog_item.rs @@ -261,6 +261,8 @@ impl ::aws_smithy_runtime_api::client::interceptors::Intercept for GetCatalogIte #[non_exhaustive] #[derive(::std::fmt::Debug)] pub enum GetCatalogItemError { + ///

You do not have permission to perform this operation.

+ AccessDeniedException(crate::types::error::AccessDeniedException), ///

An internal error has occurred.

InternalServerException(crate::types::error::InternalServerException), ///

The specified request is not valid.

@@ -300,12 +302,17 @@ impl GetCatalogItemError { /// pub fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { match self { + Self::AccessDeniedException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::InternalServerException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::NotFoundException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::ValidationException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::Unhandled(e) => &e.meta, } } + /// Returns `true` if the error kind is `GetCatalogItemError::AccessDeniedException`. + pub fn is_access_denied_exception(&self) -> bool { + matches!(self, Self::AccessDeniedException(_)) + } /// Returns `true` if the error kind is `GetCatalogItemError::InternalServerException`. pub fn is_internal_server_exception(&self) -> bool { matches!(self, Self::InternalServerException(_)) @@ -322,6 +329,7 @@ impl GetCatalogItemError { impl ::std::error::Error for GetCatalogItemError { fn source(&self) -> ::std::option::Option<&(dyn ::std::error::Error + 'static)> { match self { + Self::AccessDeniedException(_inner) => ::std::option::Option::Some(_inner), Self::InternalServerException(_inner) => ::std::option::Option::Some(_inner), Self::NotFoundException(_inner) => ::std::option::Option::Some(_inner), Self::ValidationException(_inner) => ::std::option::Option::Some(_inner), @@ -332,6 +340,7 @@ impl ::std::error::Error for GetCatalogItemError { impl ::std::fmt::Display for GetCatalogItemError { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match self { + Self::AccessDeniedException(_inner) => _inner.fmt(f), Self::InternalServerException(_inner) => _inner.fmt(f), Self::NotFoundException(_inner) => _inner.fmt(f), Self::ValidationException(_inner) => _inner.fmt(f), @@ -356,6 +365,7 @@ impl ::aws_smithy_types::retry::ProvideErrorKind for GetCatalogItemError { impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for GetCatalogItemError { fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { match self { + Self::AccessDeniedException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::InternalServerException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::NotFoundException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::ValidationException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), diff --git a/sdk/outposts/src/operation/get_outpost_billing_information.rs b/sdk/outposts/src/operation/get_outpost_billing_information.rs new file mode 100644 index 000000000000..c0a4792f3c6f --- /dev/null +++ b/sdk/outposts/src/operation/get_outpost_billing_information.rs @@ -0,0 +1,424 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +/// Orchestration and serialization glue logic for `GetOutpostBillingInformation`. +#[derive(::std::clone::Clone, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct GetOutpostBillingInformation; +impl GetOutpostBillingInformation { + /// Creates a new `GetOutpostBillingInformation` + pub fn new() -> Self { + Self + } + pub(crate) async fn orchestrate( + runtime_plugins: &::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins, + input: crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput, + ) -> ::std::result::Result< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let map_err = |err: ::aws_smithy_runtime_api::client::result::SdkError< + ::aws_smithy_runtime_api::client::interceptors::context::Error, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >| { + err.map_service_error(|err| { + err.downcast::() + .expect("correct error type") + }) + }; + let context = Self::orchestrate_with_stop_point(runtime_plugins, input, ::aws_smithy_runtime::client::orchestrator::StopPoint::None) + .await + .map_err(map_err)?; + let output = context.finalize().map_err(map_err)?; + ::std::result::Result::Ok( + output + .downcast::() + .expect("correct output type"), + ) + } + + pub(crate) async fn orchestrate_with_stop_point( + runtime_plugins: &::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins, + input: crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput, + stop_point: ::aws_smithy_runtime::client::orchestrator::StopPoint, + ) -> ::std::result::Result< + ::aws_smithy_runtime_api::client::interceptors::context::InterceptorContext, + ::aws_smithy_runtime_api::client::result::SdkError< + ::aws_smithy_runtime_api::client::interceptors::context::Error, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let input = ::aws_smithy_runtime_api::client::interceptors::context::Input::erase(input); + use ::tracing::Instrument; + ::aws_smithy_runtime::client::orchestrator::invoke_with_stop_point( + "Outposts", + "GetOutpostBillingInformation", + input, + runtime_plugins, + stop_point, + ) + // Create a parent span for the entire operation. Includes a random, internal-only, + // seven-digit ID for the operation orchestration so that it can be correlated in the logs. + .instrument(::tracing::debug_span!( + "Outposts.GetOutpostBillingInformation", + "rpc.service" = "Outposts", + "rpc.method" = "GetOutpostBillingInformation", + "sdk_invocation_id" = ::fastrand::u32(1_000_000..10_000_000), + "rpc.system" = "aws-api", + )) + .await + } + + pub(crate) fn operation_runtime_plugins( + client_runtime_plugins: ::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins, + client_config: &crate::config::Config, + config_override: ::std::option::Option, + ) -> ::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugins { + let mut runtime_plugins = client_runtime_plugins.with_operation_plugin(Self::new()); + + if let ::std::option::Option::Some(config_override) = config_override { + for plugin in config_override.runtime_plugins.iter().cloned() { + runtime_plugins = runtime_plugins.with_operation_plugin(plugin); + } + runtime_plugins = runtime_plugins.with_operation_plugin(crate::config::ConfigOverrideRuntimePlugin::new( + config_override, + client_config.config.clone(), + &client_config.runtime_components, + )); + } + runtime_plugins + } +} +impl ::aws_smithy_runtime_api::client::runtime_plugin::RuntimePlugin for GetOutpostBillingInformation { + fn config(&self) -> ::std::option::Option<::aws_smithy_types::config_bag::FrozenLayer> { + let mut cfg = ::aws_smithy_types::config_bag::Layer::new("GetOutpostBillingInformation"); + + cfg.store_put(::aws_smithy_runtime_api::client::ser_de::SharedRequestSerializer::new( + GetOutpostBillingInformationRequestSerializer, + )); + cfg.store_put(::aws_smithy_runtime_api::client::ser_de::SharedResponseDeserializer::new( + GetOutpostBillingInformationResponseDeserializer, + )); + + cfg.store_put(::aws_smithy_runtime_api::client::auth::AuthSchemeOptionResolverParams::new( + crate::config::auth::Params::builder() + .operation_name("GetOutpostBillingInformation") + .build() + .expect("required fields set"), + )); + + cfg.store_put(::aws_smithy_runtime_api::client::orchestrator::Metadata::new( + "GetOutpostBillingInformation", + "Outposts", + )); + let mut signing_options = ::aws_runtime::auth::SigningOptions::default(); + signing_options.double_uri_encode = true; + signing_options.content_sha256_header = false; + signing_options.normalize_uri_path = true; + signing_options.payload_override = None; + + cfg.store_put(::aws_runtime::auth::SigV4OperationSigningConfig { + signing_options, + ..::std::default::Default::default() + }); + + ::std::option::Option::Some(cfg.freeze()) + } + + fn runtime_components( + &self, + _: &::aws_smithy_runtime_api::client::runtime_components::RuntimeComponentsBuilder, + ) -> ::std::borrow::Cow<'_, ::aws_smithy_runtime_api::client::runtime_components::RuntimeComponentsBuilder> { + #[allow(unused_mut)] + let mut rcb = ::aws_smithy_runtime_api::client::runtime_components::RuntimeComponentsBuilder::new("GetOutpostBillingInformation") + .with_interceptor(::aws_smithy_runtime::client::stalled_stream_protection::StalledStreamProtectionInterceptor::default()) + .with_interceptor(GetOutpostBillingInformationEndpointParamsInterceptor) + .with_retry_classifier(::aws_smithy_runtime::client::retries::classifiers::TransientErrorClassifier::< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + >::new()) + .with_retry_classifier(::aws_smithy_runtime::client::retries::classifiers::ModeledAsRetryableClassifier::< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + >::new()) + .with_retry_classifier(::aws_runtime::retries::classifiers::AwsErrorCodeClassifier::< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + >::new()); + + ::std::borrow::Cow::Owned(rcb) + } +} + +#[derive(Debug)] +struct GetOutpostBillingInformationResponseDeserializer; +impl ::aws_smithy_runtime_api::client::ser_de::DeserializeResponse for GetOutpostBillingInformationResponseDeserializer { + fn deserialize_nonstreaming( + &self, + response: &::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + ) -> ::aws_smithy_runtime_api::client::interceptors::context::OutputOrError { + let (success, status) = (response.status().is_success(), response.status().as_u16()); + let headers = response.headers(); + let body = response.body().bytes().expect("body loaded"); + #[allow(unused_mut)] + let mut force_error = false; + ::tracing::debug!(request_id = ?::aws_types::request_id::RequestId::request_id(response)); + let parse_result = if !success && status != 200 || force_error { + crate::protocol_serde::shape_get_outpost_billing_information::de_get_outpost_billing_information_http_error(status, headers, body) + } else { + crate::protocol_serde::shape_get_outpost_billing_information::de_get_outpost_billing_information_http_response(status, headers, body) + }; + crate::protocol_serde::type_erase_result(parse_result) + } +} +#[derive(Debug)] +struct GetOutpostBillingInformationRequestSerializer; +impl ::aws_smithy_runtime_api::client::ser_de::SerializeRequest for GetOutpostBillingInformationRequestSerializer { + #[allow(unused_mut, clippy::let_and_return, clippy::needless_borrow, clippy::useless_conversion)] + fn serialize_input( + &self, + input: ::aws_smithy_runtime_api::client::interceptors::context::Input, + _cfg: &mut ::aws_smithy_types::config_bag::ConfigBag, + ) -> ::std::result::Result<::aws_smithy_runtime_api::client::orchestrator::HttpRequest, ::aws_smithy_runtime_api::box_error::BoxError> { + let input = input + .downcast::() + .expect("correct type"); + let _header_serialization_settings = _cfg + .load::() + .cloned() + .unwrap_or_default(); + let mut request_builder = { + fn uri_base( + _input: &crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput, + output: &mut ::std::string::String, + ) -> ::std::result::Result<(), ::aws_smithy_types::error::operation::BuildError> { + use ::std::fmt::Write as _; + let input_1 = &_input.outpost_identifier; + let input_1 = input_1.as_ref().ok_or_else(|| { + ::aws_smithy_types::error::operation::BuildError::missing_field("outpost_identifier", "cannot be empty or unset") + })?; + let outpost_identifier = ::aws_smithy_http::label::fmt_string(input_1, ::aws_smithy_http::label::EncodingStrategy::Default); + if outpost_identifier.is_empty() { + return ::std::result::Result::Err(::aws_smithy_types::error::operation::BuildError::missing_field( + "outpost_identifier", + "cannot be empty or unset", + )); + } + ::std::write!( + output, + "/outpost/{OutpostIdentifier}/billing-information", + OutpostIdentifier = outpost_identifier + ) + .expect("formatting should succeed"); + ::std::result::Result::Ok(()) + } + fn uri_query( + _input: &crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput, + mut output: &mut ::std::string::String, + ) -> ::std::result::Result<(), ::aws_smithy_types::error::operation::BuildError> { + let mut query = ::aws_smithy_http::query::Writer::new(output); + if let ::std::option::Option::Some(inner_2) = &_input.next_token { + { + query.push_kv("NextToken", &::aws_smithy_http::query::fmt_string(inner_2)); + } + } + if let ::std::option::Option::Some(inner_3) = &_input.max_results { + { + query.push_kv("MaxResults", ::aws_smithy_types::primitive::Encoder::from(*inner_3).encode()); + } + } + ::std::result::Result::Ok(()) + } + #[allow(clippy::unnecessary_wraps)] + fn update_http_builder( + input: &crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput, + builder: ::http::request::Builder, + ) -> ::std::result::Result<::http::request::Builder, ::aws_smithy_types::error::operation::BuildError> { + let mut uri = ::std::string::String::new(); + uri_base(input, &mut uri)?; + uri_query(input, &mut uri)?; + ::std::result::Result::Ok(builder.method("GET").uri(uri)) + } + let mut builder = update_http_builder(&input, ::http::request::Builder::new())?; + builder + }; + let body = ::aws_smithy_types::body::SdkBody::from(""); + + ::std::result::Result::Ok(request_builder.body(body).expect("valid request").try_into().unwrap()) + } +} +#[derive(Debug)] +struct GetOutpostBillingInformationEndpointParamsInterceptor; + +impl ::aws_smithy_runtime_api::client::interceptors::Intercept for GetOutpostBillingInformationEndpointParamsInterceptor { + fn name(&self) -> &'static str { + "GetOutpostBillingInformationEndpointParamsInterceptor" + } + + fn read_before_execution( + &self, + context: &::aws_smithy_runtime_api::client::interceptors::context::BeforeSerializationInterceptorContextRef< + '_, + ::aws_smithy_runtime_api::client::interceptors::context::Input, + ::aws_smithy_runtime_api::client::interceptors::context::Output, + ::aws_smithy_runtime_api::client::interceptors::context::Error, + >, + cfg: &mut ::aws_smithy_types::config_bag::ConfigBag, + ) -> ::std::result::Result<(), ::aws_smithy_runtime_api::box_error::BoxError> { + let _input = context + .input() + .downcast_ref::() + .ok_or("failed to downcast to GetOutpostBillingInformationInput")?; + + let params = crate::config::endpoint::Params::builder() + .set_region(cfg.load::<::aws_types::region::Region>().map(|r| r.as_ref().to_owned())) + .set_use_dual_stack(cfg.load::<::aws_types::endpoint_config::UseDualStack>().map(|ty| ty.0)) + .set_use_fips(cfg.load::<::aws_types::endpoint_config::UseFips>().map(|ty| ty.0)) + .set_endpoint(cfg.load::<::aws_types::endpoint_config::EndpointUrl>().map(|ty| ty.0.clone())) + .build() + .map_err(|err| { + ::aws_smithy_runtime_api::client::interceptors::error::ContextAttachedError::new("endpoint params could not be built", err) + })?; + cfg.interceptor_state() + .store_put(::aws_smithy_runtime_api::client::endpoint::EndpointResolverParams::new(params)); + ::std::result::Result::Ok(()) + } +} + +// The get_* functions below are generated from JMESPath expressions in the +// operationContextParams trait. They target the operation's input shape. + +/// Error type for the `GetOutpostBillingInformationError` operation. +#[non_exhaustive] +#[derive(::std::fmt::Debug)] +pub enum GetOutpostBillingInformationError { + ///

You do not have permission to perform this operation.

+ AccessDeniedException(crate::types::error::AccessDeniedException), + ///

An internal error has occurred.

+ InternalServerException(crate::types::error::InternalServerException), + ///

The specified request is not valid.

+ NotFoundException(crate::types::error::NotFoundException), + /// An unexpected error occurred (e.g., invalid JSON returned by the service or an unknown error code). + #[deprecated(note = "Matching `Unhandled` directly is not forwards compatible. Instead, match using a \ + variable wildcard pattern and check `.code()`: + \ +    `err if err.code() == Some(\"SpecificExceptionCode\") => { /* handle the error */ }` + \ + See [`ProvideErrorMetadata`](#impl-ProvideErrorMetadata-for-GetOutpostBillingInformationError) for what information is available for the error.")] + Unhandled(crate::error::sealed_unhandled::Unhandled), +} +impl GetOutpostBillingInformationError { + /// Creates the `GetOutpostBillingInformationError::Unhandled` variant from any error type. + pub fn unhandled( + err: impl ::std::convert::Into<::std::boxed::Box>, + ) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source: err.into(), + meta: ::std::default::Default::default(), + }) + } + + /// Creates the `GetOutpostBillingInformationError::Unhandled` variant from an [`ErrorMetadata`](::aws_smithy_types::error::ErrorMetadata). + pub fn generic(err: ::aws_smithy_types::error::ErrorMetadata) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source: err.clone().into(), + meta: err, + }) + } + /// + /// Returns error metadata, which includes the error code, message, + /// request ID, and potentially additional information. + /// + pub fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { + match self { + Self::AccessDeniedException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::InternalServerException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::NotFoundException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), + Self::Unhandled(e) => &e.meta, + } + } + /// Returns `true` if the error kind is `GetOutpostBillingInformationError::AccessDeniedException`. + pub fn is_access_denied_exception(&self) -> bool { + matches!(self, Self::AccessDeniedException(_)) + } + /// Returns `true` if the error kind is `GetOutpostBillingInformationError::InternalServerException`. + pub fn is_internal_server_exception(&self) -> bool { + matches!(self, Self::InternalServerException(_)) + } + /// Returns `true` if the error kind is `GetOutpostBillingInformationError::NotFoundException`. + pub fn is_not_found_exception(&self) -> bool { + matches!(self, Self::NotFoundException(_)) + } +} +impl ::std::error::Error for GetOutpostBillingInformationError { + fn source(&self) -> ::std::option::Option<&(dyn ::std::error::Error + 'static)> { + match self { + Self::AccessDeniedException(_inner) => ::std::option::Option::Some(_inner), + Self::InternalServerException(_inner) => ::std::option::Option::Some(_inner), + Self::NotFoundException(_inner) => ::std::option::Option::Some(_inner), + Self::Unhandled(_inner) => ::std::option::Option::Some(&*_inner.source), + } + } +} +impl ::std::fmt::Display for GetOutpostBillingInformationError { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + match self { + Self::AccessDeniedException(_inner) => _inner.fmt(f), + Self::InternalServerException(_inner) => _inner.fmt(f), + Self::NotFoundException(_inner) => _inner.fmt(f), + Self::Unhandled(_inner) => { + if let ::std::option::Option::Some(code) = ::aws_smithy_types::error::metadata::ProvideErrorMetadata::code(self) { + write!(f, "unhandled error ({code})") + } else { + f.write_str("unhandled error") + } + } + } + } +} +impl ::aws_smithy_types::retry::ProvideErrorKind for GetOutpostBillingInformationError { + fn code(&self) -> ::std::option::Option<&str> { + ::aws_smithy_types::error::metadata::ProvideErrorMetadata::code(self) + } + fn retryable_error_kind(&self) -> ::std::option::Option<::aws_smithy_types::retry::ErrorKind> { + ::std::option::Option::None + } +} +impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for GetOutpostBillingInformationError { + fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { + match self { + Self::AccessDeniedException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::InternalServerException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::NotFoundException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), + Self::Unhandled(_inner) => &_inner.meta, + } + } +} +impl ::aws_smithy_runtime_api::client::result::CreateUnhandledError for GetOutpostBillingInformationError { + fn create_unhandled_error( + source: ::std::boxed::Box, + meta: ::std::option::Option<::aws_smithy_types::error::ErrorMetadata>, + ) -> Self { + Self::Unhandled(crate::error::sealed_unhandled::Unhandled { + source, + meta: meta.unwrap_or_default(), + }) + } +} +impl ::aws_types::request_id::RequestId for crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError { + fn request_id(&self) -> Option<&str> { + self.meta().request_id() + } +} + +pub use crate::operation::get_outpost_billing_information::_get_outpost_billing_information_output::GetOutpostBillingInformationOutput; + +pub use crate::operation::get_outpost_billing_information::_get_outpost_billing_information_input::GetOutpostBillingInformationInput; + +mod _get_outpost_billing_information_input; + +mod _get_outpost_billing_information_output; + +/// Builders +pub mod builders; + +/// Paginator for this operation +pub mod paginator; diff --git a/sdk/outposts/src/operation/get_outpost_billing_information/_get_outpost_billing_information_input.rs b/sdk/outposts/src/operation/get_outpost_billing_information/_get_outpost_billing_information_input.rs new file mode 100644 index 000000000000..b29a8978dee3 --- /dev/null +++ b/sdk/outposts/src/operation/get_outpost_billing_information/_get_outpost_billing_information_input.rs @@ -0,0 +1,99 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +#[allow(missing_docs)] // documentation missing in model +#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub struct GetOutpostBillingInformationInput { + ///

The pagination token.

+ pub next_token: ::std::option::Option<::std::string::String>, + ///

The maximum page size.

+ pub max_results: ::std::option::Option, + ///

The ID or ARN of the Outpost.

+ pub outpost_identifier: ::std::option::Option<::std::string::String>, +} +impl GetOutpostBillingInformationInput { + ///

The pagination token.

+ pub fn next_token(&self) -> ::std::option::Option<&str> { + self.next_token.as_deref() + } + ///

The maximum page size.

+ pub fn max_results(&self) -> ::std::option::Option { + self.max_results + } + ///

The ID or ARN of the Outpost.

+ pub fn outpost_identifier(&self) -> ::std::option::Option<&str> { + self.outpost_identifier.as_deref() + } +} +impl GetOutpostBillingInformationInput { + /// Creates a new builder-style object to manufacture [`GetOutpostBillingInformationInput`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput). + pub fn builder() -> crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationInputBuilder { + crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationInputBuilder::default() + } +} + +/// A builder for [`GetOutpostBillingInformationInput`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput). +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct GetOutpostBillingInformationInputBuilder { + pub(crate) next_token: ::std::option::Option<::std::string::String>, + pub(crate) max_results: ::std::option::Option, + pub(crate) outpost_identifier: ::std::option::Option<::std::string::String>, +} +impl GetOutpostBillingInformationInputBuilder { + ///

The pagination token.

+ pub fn next_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.next_token = ::std::option::Option::Some(input.into()); + self + } + ///

The pagination token.

+ pub fn set_next_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.next_token = input; + self + } + ///

The pagination token.

+ pub fn get_next_token(&self) -> &::std::option::Option<::std::string::String> { + &self.next_token + } + ///

The maximum page size.

+ pub fn max_results(mut self, input: i32) -> Self { + self.max_results = ::std::option::Option::Some(input); + self + } + ///

The maximum page size.

+ pub fn set_max_results(mut self, input: ::std::option::Option) -> Self { + self.max_results = input; + self + } + ///

The maximum page size.

+ pub fn get_max_results(&self) -> &::std::option::Option { + &self.max_results + } + ///

The ID or ARN of the Outpost.

+ /// This field is required. + pub fn outpost_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.outpost_identifier = ::std::option::Option::Some(input.into()); + self + } + ///

The ID or ARN of the Outpost.

+ pub fn set_outpost_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.outpost_identifier = input; + self + } + ///

The ID or ARN of the Outpost.

+ pub fn get_outpost_identifier(&self) -> &::std::option::Option<::std::string::String> { + &self.outpost_identifier + } + /// Consumes the builder and constructs a [`GetOutpostBillingInformationInput`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput). + pub fn build( + self, + ) -> ::std::result::Result< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput, + ::aws_smithy_types::error::operation::BuildError, + > { + ::std::result::Result::Ok(crate::operation::get_outpost_billing_information::GetOutpostBillingInformationInput { + next_token: self.next_token, + max_results: self.max_results, + outpost_identifier: self.outpost_identifier, + }) + } +} diff --git a/sdk/outposts/src/operation/get_outpost_billing_information/_get_outpost_billing_information_output.rs b/sdk/outposts/src/operation/get_outpost_billing_information/_get_outpost_billing_information_output.rs new file mode 100644 index 000000000000..386f10304426 --- /dev/null +++ b/sdk/outposts/src/operation/get_outpost_billing_information/_get_outpost_billing_information_output.rs @@ -0,0 +1,118 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +#[allow(missing_docs)] // documentation missing in model +#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub struct GetOutpostBillingInformationOutput { + ///

The pagination token.

+ pub next_token: ::std::option::Option<::std::string::String>, + ///

The subscription details for the specified Outpost.

+ pub subscriptions: ::std::option::Option<::std::vec::Vec>, + ///

The date the current contract term ends for the specified Outpost. You must start the renewal or decommission process at least 5 business days before the current term for your Amazon Web Services Outposts ends. Failing to complete these steps at least 5 business days before the current term ends might result in unanticipated charges.

+ pub contract_end_date: ::std::option::Option<::std::string::String>, + _request_id: Option, +} +impl GetOutpostBillingInformationOutput { + ///

The pagination token.

+ pub fn next_token(&self) -> ::std::option::Option<&str> { + self.next_token.as_deref() + } + ///

The subscription details for the specified Outpost.

+ /// + /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.subscriptions.is_none()`. + pub fn subscriptions(&self) -> &[crate::types::Subscription] { + self.subscriptions.as_deref().unwrap_or_default() + } + ///

The date the current contract term ends for the specified Outpost. You must start the renewal or decommission process at least 5 business days before the current term for your Amazon Web Services Outposts ends. Failing to complete these steps at least 5 business days before the current term ends might result in unanticipated charges.

+ pub fn contract_end_date(&self) -> ::std::option::Option<&str> { + self.contract_end_date.as_deref() + } +} +impl ::aws_types::request_id::RequestId for GetOutpostBillingInformationOutput { + fn request_id(&self) -> Option<&str> { + self._request_id.as_deref() + } +} +impl GetOutpostBillingInformationOutput { + /// Creates a new builder-style object to manufacture [`GetOutpostBillingInformationOutput`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput). + pub fn builder() -> crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationOutputBuilder { + crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationOutputBuilder::default() + } +} + +/// A builder for [`GetOutpostBillingInformationOutput`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput). +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct GetOutpostBillingInformationOutputBuilder { + pub(crate) next_token: ::std::option::Option<::std::string::String>, + pub(crate) subscriptions: ::std::option::Option<::std::vec::Vec>, + pub(crate) contract_end_date: ::std::option::Option<::std::string::String>, + _request_id: Option, +} +impl GetOutpostBillingInformationOutputBuilder { + ///

The pagination token.

+ pub fn next_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.next_token = ::std::option::Option::Some(input.into()); + self + } + ///

The pagination token.

+ pub fn set_next_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.next_token = input; + self + } + ///

The pagination token.

+ pub fn get_next_token(&self) -> &::std::option::Option<::std::string::String> { + &self.next_token + } + /// Appends an item to `subscriptions`. + /// + /// To override the contents of this collection use [`set_subscriptions`](Self::set_subscriptions). + /// + ///

The subscription details for the specified Outpost.

+ pub fn subscriptions(mut self, input: crate::types::Subscription) -> Self { + let mut v = self.subscriptions.unwrap_or_default(); + v.push(input); + self.subscriptions = ::std::option::Option::Some(v); + self + } + ///

The subscription details for the specified Outpost.

+ pub fn set_subscriptions(mut self, input: ::std::option::Option<::std::vec::Vec>) -> Self { + self.subscriptions = input; + self + } + ///

The subscription details for the specified Outpost.

+ pub fn get_subscriptions(&self) -> &::std::option::Option<::std::vec::Vec> { + &self.subscriptions + } + ///

The date the current contract term ends for the specified Outpost. You must start the renewal or decommission process at least 5 business days before the current term for your Amazon Web Services Outposts ends. Failing to complete these steps at least 5 business days before the current term ends might result in unanticipated charges.

+ pub fn contract_end_date(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.contract_end_date = ::std::option::Option::Some(input.into()); + self + } + ///

The date the current contract term ends for the specified Outpost. You must start the renewal or decommission process at least 5 business days before the current term for your Amazon Web Services Outposts ends. Failing to complete these steps at least 5 business days before the current term ends might result in unanticipated charges.

+ pub fn set_contract_end_date(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.contract_end_date = input; + self + } + ///

The date the current contract term ends for the specified Outpost. You must start the renewal or decommission process at least 5 business days before the current term for your Amazon Web Services Outposts ends. Failing to complete these steps at least 5 business days before the current term ends might result in unanticipated charges.

+ pub fn get_contract_end_date(&self) -> &::std::option::Option<::std::string::String> { + &self.contract_end_date + } + pub(crate) fn _request_id(mut self, request_id: impl Into) -> Self { + self._request_id = Some(request_id.into()); + self + } + + pub(crate) fn _set_request_id(&mut self, request_id: Option) -> &mut Self { + self._request_id = request_id; + self + } + /// Consumes the builder and constructs a [`GetOutpostBillingInformationOutput`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput). + pub fn build(self) -> crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput { + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput { + next_token: self.next_token, + subscriptions: self.subscriptions, + contract_end_date: self.contract_end_date, + _request_id: self._request_id, + } + } +} diff --git a/sdk/outposts/src/operation/get_outpost_billing_information/builders.rs b/sdk/outposts/src/operation/get_outpost_billing_information/builders.rs new file mode 100644 index 000000000000..bbec175d17f8 --- /dev/null +++ b/sdk/outposts/src/operation/get_outpost_billing_information/builders.rs @@ -0,0 +1,159 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub use crate::operation::get_outpost_billing_information::_get_outpost_billing_information_output::GetOutpostBillingInformationOutputBuilder; + +pub use crate::operation::get_outpost_billing_information::_get_outpost_billing_information_input::GetOutpostBillingInformationInputBuilder; + +impl crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationInputBuilder { + /// Sends a request with this input using the given client. + pub async fn send_with( + self, + client: &crate::Client, + ) -> ::std::result::Result< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let mut fluent_builder = client.get_outpost_billing_information(); + fluent_builder.inner = self; + fluent_builder.send().await + } +} +/// Fluent builder constructing a request to `GetOutpostBillingInformation`. +/// +///

Gets current and historical billing information about the specified Outpost.

+#[derive(::std::clone::Clone, ::std::fmt::Debug)] +pub struct GetOutpostBillingInformationFluentBuilder { + handle: ::std::sync::Arc, + inner: crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationInputBuilder, + config_override: ::std::option::Option, +} +impl + crate::client::customize::internal::CustomizableSend< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + > for GetOutpostBillingInformationFluentBuilder +{ + fn send( + self, + config_override: crate::config::Builder, + ) -> crate::client::customize::internal::BoxFuture< + crate::client::customize::internal::SendResult< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + >, + > { + ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await }) + } +} +impl GetOutpostBillingInformationFluentBuilder { + /// Creates a new `GetOutpostBillingInformationFluentBuilder`. + pub(crate) fn new(handle: ::std::sync::Arc) -> Self { + Self { + handle, + inner: ::std::default::Default::default(), + config_override: ::std::option::Option::None, + } + } + /// Access the GetOutpostBillingInformation as a reference. + pub fn as_input(&self) -> &crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationInputBuilder { + &self.inner + } + /// Sends the request and returns the response. + /// + /// If an error occurs, an `SdkError` will be returned with additional details that + /// can be matched against. + /// + /// By default, any retryable failures will be retried twice. Retry behavior + /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be + /// set when configuring the client. + pub async fn send( + self, + ) -> ::std::result::Result< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + > { + let input = self + .inner + .build() + .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?; + let runtime_plugins = crate::operation::get_outpost_billing_information::GetOutpostBillingInformation::operation_runtime_plugins( + self.handle.runtime_plugins.clone(), + &self.handle.conf, + self.config_override, + ); + crate::operation::get_outpost_billing_information::GetOutpostBillingInformation::orchestrate(&runtime_plugins, input).await + } + + /// Consumes this builder, creating a customizable operation that can be modified before being sent. + pub fn customize( + self, + ) -> crate::client::customize::CustomizableOperation< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + Self, + > { + crate::client::customize::CustomizableOperation::new(self) + } + pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into) -> Self { + self.set_config_override(::std::option::Option::Some(config_override.into())); + self + } + + pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option) -> &mut Self { + self.config_override = config_override; + self + } + /// Create a paginator for this request + /// + /// Paginators are used by calling [`send().await`](crate::operation::get_outpost_billing_information::paginator::GetOutpostBillingInformationPaginator::send) which returns a [`PaginationStream`](aws_smithy_async::future::pagination_stream::PaginationStream). + pub fn into_paginator(self) -> crate::operation::get_outpost_billing_information::paginator::GetOutpostBillingInformationPaginator { + crate::operation::get_outpost_billing_information::paginator::GetOutpostBillingInformationPaginator::new(self.handle, self.inner) + } + ///

The pagination token.

+ pub fn next_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.inner = self.inner.next_token(input.into()); + self + } + ///

The pagination token.

+ pub fn set_next_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.inner = self.inner.set_next_token(input); + self + } + ///

The pagination token.

+ pub fn get_next_token(&self) -> &::std::option::Option<::std::string::String> { + self.inner.get_next_token() + } + ///

The maximum page size.

+ pub fn max_results(mut self, input: i32) -> Self { + self.inner = self.inner.max_results(input); + self + } + ///

The maximum page size.

+ pub fn set_max_results(mut self, input: ::std::option::Option) -> Self { + self.inner = self.inner.set_max_results(input); + self + } + ///

The maximum page size.

+ pub fn get_max_results(&self) -> &::std::option::Option { + self.inner.get_max_results() + } + ///

The ID or ARN of the Outpost.

+ pub fn outpost_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.inner = self.inner.outpost_identifier(input.into()); + self + } + ///

The ID or ARN of the Outpost.

+ pub fn set_outpost_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.inner = self.inner.set_outpost_identifier(input); + self + } + ///

The ID or ARN of the Outpost.

+ pub fn get_outpost_identifier(&self) -> &::std::option::Option<::std::string::String> { + self.inner.get_outpost_identifier() + } +} diff --git a/sdk/outposts/src/operation/get_outpost_billing_information/paginator.rs b/sdk/outposts/src/operation/get_outpost_billing_information/paginator.rs new file mode 100644 index 000000000000..654a2f06e211 --- /dev/null +++ b/sdk/outposts/src/operation/get_outpost_billing_information/paginator.rs @@ -0,0 +1,152 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +/// Paginator for [`GetOutpostBillingInformation`](crate::operation::get_outpost_billing_information::GetOutpostBillingInformation) +pub struct GetOutpostBillingInformationPaginator { + handle: std::sync::Arc, + builder: crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationInputBuilder, + stop_on_duplicate_token: bool, +} + +impl GetOutpostBillingInformationPaginator { + /// Create a new paginator-wrapper + pub(crate) fn new( + handle: std::sync::Arc, + builder: crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationInputBuilder, + ) -> Self { + Self { + handle, + builder, + stop_on_duplicate_token: true, + } + } + + /// Set the page size + /// + /// _Note: this method will override any previously set value for `max_results`_ + pub fn page_size(mut self, limit: i32) -> Self { + self.builder.max_results = ::std::option::Option::Some(limit); + self + } + + /// Create a flattened paginator + /// + /// This paginator automatically flattens results using `subscriptions`. Queries to the underlying service + /// are dispatched lazily. + pub fn items(self) -> crate::operation::get_outpost_billing_information::paginator::GetOutpostBillingInformationPaginatorItems { + crate::operation::get_outpost_billing_information::paginator::GetOutpostBillingInformationPaginatorItems(self) + } + + /// Stop paginating when the service returns the same pagination token twice in a row. + /// + /// Defaults to true. + /// + /// For certain operations, it may be useful to continue on duplicate token. For example, + /// if an operation is for tailing a log file in real-time, then continuing may be desired. + /// This option can be set to `false` to accommodate these use cases. + pub fn stop_on_duplicate_token(mut self, stop_on_duplicate_token: bool) -> Self { + self.stop_on_duplicate_token = stop_on_duplicate_token; + self + } + + /// Create the pagination stream + /// + /// _Note:_ No requests will be dispatched until the stream is used + /// (e.g. with the [`.next().await`](aws_smithy_async::future::pagination_stream::PaginationStream::next) method). + pub fn send( + self, + ) -> ::aws_smithy_async::future::pagination_stream::PaginationStream< + ::std::result::Result< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + >, + > { + // Move individual fields out of self for the borrow checker + let builder = self.builder; + let handle = self.handle; + let runtime_plugins = crate::operation::get_outpost_billing_information::GetOutpostBillingInformation::operation_runtime_plugins( + handle.runtime_plugins.clone(), + &handle.conf, + ::std::option::Option::None, + ) + .with_operation_plugin(crate::sdk_feature_tracker::paginator::PaginatorFeatureTrackerRuntimePlugin::new()); + ::aws_smithy_async::future::pagination_stream::PaginationStream::new(::aws_smithy_async::future::pagination_stream::fn_stream::FnStream::new( + move |tx| { + ::std::boxed::Box::pin(async move { + // Build the input for the first time. If required fields are missing, this is where we'll produce an early error. + let mut input = match builder + .build() + .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure) + { + ::std::result::Result::Ok(input) => input, + ::std::result::Result::Err(e) => { + let _ = tx.send(::std::result::Result::Err(e)).await; + return; + } + }; + loop { + let resp = crate::operation::get_outpost_billing_information::GetOutpostBillingInformation::orchestrate( + &runtime_plugins, + input.clone(), + ) + .await; + // If the input member is None or it was an error + let done = match resp { + ::std::result::Result::Ok(ref resp) => { + let new_token = crate::lens::reflens_get_outpost_billing_information_output_output_next_token(resp); + // Pagination is exhausted when the next token is an empty string + let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true); + if !is_empty && new_token == input.next_token.as_ref() && self.stop_on_duplicate_token { + true + } else { + input.next_token = new_token.cloned(); + is_empty + } + } + ::std::result::Result::Err(_) => true, + }; + if tx.send(resp).await.is_err() { + // receiving end was dropped + return; + } + if done { + return; + } + } + }) + }, + )) + } +} + +/// Flattened paginator for `GetOutpostBillingInformationPaginator` +/// +/// This is created with [`.items()`](GetOutpostBillingInformationPaginator::items) +pub struct GetOutpostBillingInformationPaginatorItems(GetOutpostBillingInformationPaginator); + +impl GetOutpostBillingInformationPaginatorItems { + /// Create the pagination stream + /// + /// _Note_: No requests will be dispatched until the stream is used + /// (e.g. with the [`.next().await`](aws_smithy_async::future::pagination_stream::PaginationStream::next) method). + /// + /// To read the entirety of the paginator, use [`.collect::, _>()`](aws_smithy_async::future::pagination_stream::PaginationStream::collect). + pub fn send( + self, + ) -> ::aws_smithy_async::future::pagination_stream::PaginationStream< + ::std::result::Result< + crate::types::Subscription, + ::aws_smithy_runtime_api::client::result::SdkError< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, + >, + >, + > { + ::aws_smithy_async::future::pagination_stream::TryFlatMap::new(self.0.send()).flat_map(|page| { + crate::lens::lens_get_outpost_billing_information_output_output_subscriptions(page) + .unwrap_or_default() + .into_iter() + }) + } +} diff --git a/sdk/outposts/src/operation/list_catalog_items.rs b/sdk/outposts/src/operation/list_catalog_items.rs index 94c7020d4a90..51cfbdcaa4e7 100644 --- a/sdk/outposts/src/operation/list_catalog_items.rs +++ b/sdk/outposts/src/operation/list_catalog_items.rs @@ -289,6 +289,8 @@ impl ::aws_smithy_runtime_api::client::interceptors::Intercept for ListCatalogIt #[non_exhaustive] #[derive(::std::fmt::Debug)] pub enum ListCatalogItemsError { + ///

You do not have permission to perform this operation.

+ AccessDeniedException(crate::types::error::AccessDeniedException), ///

An internal error has occurred.

InternalServerException(crate::types::error::InternalServerException), ///

The specified request is not valid.

@@ -328,12 +330,17 @@ impl ListCatalogItemsError { /// pub fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { match self { + Self::AccessDeniedException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::InternalServerException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::NotFoundException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::ValidationException(e) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(e), Self::Unhandled(e) => &e.meta, } } + /// Returns `true` if the error kind is `ListCatalogItemsError::AccessDeniedException`. + pub fn is_access_denied_exception(&self) -> bool { + matches!(self, Self::AccessDeniedException(_)) + } /// Returns `true` if the error kind is `ListCatalogItemsError::InternalServerException`. pub fn is_internal_server_exception(&self) -> bool { matches!(self, Self::InternalServerException(_)) @@ -350,6 +357,7 @@ impl ListCatalogItemsError { impl ::std::error::Error for ListCatalogItemsError { fn source(&self) -> ::std::option::Option<&(dyn ::std::error::Error + 'static)> { match self { + Self::AccessDeniedException(_inner) => ::std::option::Option::Some(_inner), Self::InternalServerException(_inner) => ::std::option::Option::Some(_inner), Self::NotFoundException(_inner) => ::std::option::Option::Some(_inner), Self::ValidationException(_inner) => ::std::option::Option::Some(_inner), @@ -360,6 +368,7 @@ impl ::std::error::Error for ListCatalogItemsError { impl ::std::fmt::Display for ListCatalogItemsError { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match self { + Self::AccessDeniedException(_inner) => _inner.fmt(f), Self::InternalServerException(_inner) => _inner.fmt(f), Self::NotFoundException(_inner) => _inner.fmt(f), Self::ValidationException(_inner) => _inner.fmt(f), @@ -384,6 +393,7 @@ impl ::aws_smithy_types::retry::ProvideErrorKind for ListCatalogItemsError { impl ::aws_smithy_types::error::metadata::ProvideErrorMetadata for ListCatalogItemsError { fn meta(&self) -> &::aws_smithy_types::error::ErrorMetadata { match self { + Self::AccessDeniedException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::InternalServerException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::NotFoundException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), Self::ValidationException(_inner) => ::aws_smithy_types::error::metadata::ProvideErrorMetadata::meta(_inner), diff --git a/sdk/outposts/src/protocol_serde.rs b/sdk/outposts/src/protocol_serde.rs index be0e0e15a541..ff7efde50777 100644 --- a/sdk/outposts/src/protocol_serde.rs +++ b/sdk/outposts/src/protocol_serde.rs @@ -47,6 +47,8 @@ pub(crate) mod shape_get_order; pub(crate) mod shape_get_outpost; +pub(crate) mod shape_get_outpost_billing_information; + pub(crate) mod shape_get_outpost_instance_types; pub(crate) mod shape_get_outpost_supported_instance_types; @@ -171,6 +173,8 @@ pub(crate) mod shape_site; pub(crate) mod shape_site_list_definition; +pub(crate) mod shape_subscription_list; + pub(crate) mod shape_tag_map; pub(crate) mod shape_account_id_list; @@ -197,6 +201,8 @@ pub(crate) mod shape_line_item_list_definition; pub(crate) mod shape_order_summary; +pub(crate) mod shape_subscription; + pub(crate) mod shape_supported_storage_list; pub(crate) mod shape_supported_uplink_gbps_list_definition; @@ -211,6 +217,8 @@ pub(crate) mod shape_line_item; pub(crate) mod shape_line_item_status_counts; +pub(crate) mod shape_order_id_list; + pub(crate) mod shape_asset_instance_capacity_list; pub(crate) mod shape_instance_families; diff --git a/sdk/outposts/src/protocol_serde/shape_get_catalog_item.rs b/sdk/outposts/src/protocol_serde/shape_get_catalog_item.rs index f3eb0fd39d47..f2892975be33 100644 --- a/sdk/outposts/src/protocol_serde/shape_get_catalog_item.rs +++ b/sdk/outposts/src/protocol_serde/shape_get_catalog_item.rs @@ -17,6 +17,21 @@ pub fn de_get_catalog_item_http_error( let _error_message = generic.message().map(|msg| msg.to_owned()); Err(match error_code { + "AccessDeniedException" => crate::operation::get_catalog_item::GetCatalogItemError::AccessDeniedException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::AccessDeniedExceptionBuilder::default(); + output = crate::protocol_serde::shape_access_denied_exception::de_access_denied_exception_json_err(_response_body, output) + .map_err(crate::operation::get_catalog_item::GetCatalogItemError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), "InternalServerException" => crate::operation::get_catalog_item::GetCatalogItemError::InternalServerException({ #[allow(unused_mut)] let mut tmp = { diff --git a/sdk/outposts/src/protocol_serde/shape_get_outpost_billing_information.rs b/sdk/outposts/src/protocol_serde/shape_get_outpost_billing_information.rs new file mode 100644 index 000000000000..9d65afd23441 --- /dev/null +++ b/sdk/outposts/src/protocol_serde/shape_get_outpost_billing_information.rs @@ -0,0 +1,138 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +#[allow(clippy::unnecessary_wraps)] +pub fn de_get_outpost_billing_information_http_error( + _response_status: u16, + _response_headers: &::aws_smithy_runtime_api::http::Headers, + _response_body: &[u8], +) -> std::result::Result< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, +> { + #[allow(unused_mut)] + let mut generic_builder = crate::protocol_serde::parse_http_error_metadata(_response_status, _response_headers, _response_body) + .map_err(crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::unhandled)?; + generic_builder = ::aws_types::request_id::apply_request_id(generic_builder, _response_headers); + let generic = generic_builder.build(); + let error_code = match generic.code() { + Some(code) => code, + None => return Err(crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::unhandled(generic)), + }; + + let _error_message = generic.message().map(|msg| msg.to_owned()); + Err(match error_code { + "AccessDeniedException" => crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::AccessDeniedException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::AccessDeniedExceptionBuilder::default(); + output = crate::protocol_serde::shape_access_denied_exception::de_access_denied_exception_json_err(_response_body, output) + .map_err(crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + "InternalServerException" => crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::InternalServerException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::InternalServerExceptionBuilder::default(); + output = crate::protocol_serde::shape_internal_server_exception::de_internal_server_exception_json_err(_response_body, output) + .map_err(crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + "NotFoundException" => crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::NotFoundException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::NotFoundExceptionBuilder::default(); + output = crate::protocol_serde::shape_not_found_exception::de_not_found_exception_json_err(_response_body, output) + .map_err(crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), + _ => crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::generic(generic), + }) +} + +#[allow(clippy::unnecessary_wraps)] +pub fn de_get_outpost_billing_information_http_response( + _response_status: u16, + _response_headers: &::aws_smithy_runtime_api::http::Headers, + _response_body: &[u8], +) -> std::result::Result< + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationOutput, + crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError, +> { + Ok({ + #[allow(unused_mut)] + let mut output = crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationOutputBuilder::default(); + output = crate::protocol_serde::shape_get_outpost_billing_information::de_get_outpost_billing_information(_response_body, output) + .map_err(crate::operation::get_outpost_billing_information::GetOutpostBillingInformationError::unhandled)?; + output._set_request_id(::aws_types::request_id::RequestId::request_id(_response_headers).map(str::to_string)); + output.build() + }) +} + +pub(crate) fn de_get_outpost_billing_information( + value: &[u8], + mut builder: crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationOutputBuilder, +) -> ::std::result::Result< + crate::operation::get_outpost_billing_information::builders::GetOutpostBillingInformationOutputBuilder, + ::aws_smithy_json::deserialize::error::DeserializeError, +> { + let mut tokens_owned = ::aws_smithy_json::deserialize::json_token_iter(crate::protocol_serde::or_empty_doc(value)).peekable(); + let tokens = &mut tokens_owned; + ::aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; + loop { + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::EndObject { .. }) => break, + Some(::aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => match key.to_unescaped()?.as_ref() { + "ContractEndDate" => { + builder = builder.set_contract_end_date( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?, + ); + } + "NextToken" => { + builder = builder.set_next_token( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?, + ); + } + "Subscriptions" => { + builder = builder.set_subscriptions(crate::protocol_serde::shape_subscription_list::de_subscription_list(tokens)?); + } + _ => ::aws_smithy_json::deserialize::token::skip_value(tokens)?, + }, + other => { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom(format!( + "expected object key or end object, found: {:?}", + other + ))) + } + } + } + if tokens.next().is_some() { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "found more JSON tokens after completing parsing", + )); + } + Ok(builder) +} diff --git a/sdk/outposts/src/protocol_serde/shape_list_catalog_items.rs b/sdk/outposts/src/protocol_serde/shape_list_catalog_items.rs index 6b3c5e96706b..1a3d7f9b1bde 100644 --- a/sdk/outposts/src/protocol_serde/shape_list_catalog_items.rs +++ b/sdk/outposts/src/protocol_serde/shape_list_catalog_items.rs @@ -17,6 +17,21 @@ pub fn de_list_catalog_items_http_error( let _error_message = generic.message().map(|msg| msg.to_owned()); Err(match error_code { + "AccessDeniedException" => crate::operation::list_catalog_items::ListCatalogItemsError::AccessDeniedException({ + #[allow(unused_mut)] + let mut tmp = { + #[allow(unused_mut)] + let mut output = crate::types::error::builders::AccessDeniedExceptionBuilder::default(); + output = crate::protocol_serde::shape_access_denied_exception::de_access_denied_exception_json_err(_response_body, output) + .map_err(crate::operation::list_catalog_items::ListCatalogItemsError::unhandled)?; + let output = output.meta(generic); + output.build() + }; + if tmp.message.is_none() { + tmp.message = _error_message; + } + tmp + }), "InternalServerException" => crate::operation::list_catalog_items::ListCatalogItemsError::InternalServerException({ #[allow(unused_mut)] let mut tmp = { diff --git a/sdk/outposts/src/protocol_serde/shape_order_id_list.rs b/sdk/outposts/src/protocol_serde/shape_order_id_list.rs new file mode 100644 index 000000000000..14100773d410 --- /dev/null +++ b/sdk/outposts/src/protocol_serde/shape_order_id_list.rs @@ -0,0 +1,34 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub(crate) fn de_order_id_list<'a, I>( + tokens: &mut ::std::iter::Peekable, +) -> ::std::result::Result>, ::aws_smithy_json::deserialize::error::DeserializeError> +where + I: Iterator, ::aws_smithy_json::deserialize::error::DeserializeError>>, +{ + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), + Some(::aws_smithy_json::deserialize::Token::StartArray { .. }) => { + let mut items = Vec::new(); + loop { + match tokens.peek() { + Some(Ok(::aws_smithy_json::deserialize::Token::EndArray { .. })) => { + tokens.next().transpose().unwrap(); + break; + } + _ => { + let value = ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?; + if let Some(value) = value { + items.push(value); + } + } + } + } + Ok(Some(items)) + } + _ => Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "expected start array or null", + )), + } +} diff --git a/sdk/outposts/src/protocol_serde/shape_subscription.rs b/sdk/outposts/src/protocol_serde/shape_subscription.rs new file mode 100644 index 000000000000..bf2022adb7e9 --- /dev/null +++ b/sdk/outposts/src/protocol_serde/shape_subscription.rs @@ -0,0 +1,79 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub(crate) fn de_subscription<'a, I>( + tokens: &mut ::std::iter::Peekable, +) -> ::std::result::Result, ::aws_smithy_json::deserialize::error::DeserializeError> +where + I: Iterator, ::aws_smithy_json::deserialize::error::DeserializeError>>, +{ + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), + Some(::aws_smithy_json::deserialize::Token::StartObject { .. }) => { + #[allow(unused_mut)] + let mut builder = crate::types::builders::SubscriptionBuilder::default(); + loop { + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::EndObject { .. }) => break, + Some(::aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => match key.to_unescaped()?.as_ref() { + "SubscriptionId" => { + builder = builder.set_subscription_id( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| u.into_owned())) + .transpose()?, + ); + } + "SubscriptionType" => { + builder = builder.set_subscription_type( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| crate::types::SubscriptionType::from(u.as_ref()))) + .transpose()?, + ); + } + "SubscriptionStatus" => { + builder = builder.set_subscription_status( + ::aws_smithy_json::deserialize::token::expect_string_or_null(tokens.next())? + .map(|s| s.to_unescaped().map(|u| crate::types::SubscriptionStatus::from(u.as_ref()))) + .transpose()?, + ); + } + "OrderIds" => { + builder = builder.set_order_ids(crate::protocol_serde::shape_order_id_list::de_order_id_list(tokens)?); + } + "BeginDate" => { + builder = builder.set_begin_date(::aws_smithy_json::deserialize::token::expect_timestamp_or_null( + tokens.next(), + ::aws_smithy_types::date_time::Format::EpochSeconds, + )?); + } + "EndDate" => { + builder = builder.set_end_date(::aws_smithy_json::deserialize::token::expect_timestamp_or_null( + tokens.next(), + ::aws_smithy_types::date_time::Format::EpochSeconds, + )?); + } + "MonthlyRecurringPrice" => { + builder = builder.set_monthly_recurring_price( + ::aws_smithy_json::deserialize::token::expect_number_or_null(tokens.next())?.map(|v| v.to_f64_lossy()), + ); + } + "UpfrontPrice" => { + builder = builder.set_upfront_price( + ::aws_smithy_json::deserialize::token::expect_number_or_null(tokens.next())?.map(|v| v.to_f64_lossy()), + ); + } + _ => ::aws_smithy_json::deserialize::token::skip_value(tokens)?, + }, + other => { + return Err(::aws_smithy_json::deserialize::error::DeserializeError::custom(format!( + "expected object key or end object, found: {:?}", + other + ))) + } + } + } + Ok(Some(builder.build())) + } + _ => Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "expected start object or null", + )), + } +} diff --git a/sdk/outposts/src/protocol_serde/shape_subscription_list.rs b/sdk/outposts/src/protocol_serde/shape_subscription_list.rs new file mode 100644 index 000000000000..0c32e1d9ea61 --- /dev/null +++ b/sdk/outposts/src/protocol_serde/shape_subscription_list.rs @@ -0,0 +1,32 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. +pub(crate) fn de_subscription_list<'a, I>( + tokens: &mut ::std::iter::Peekable, +) -> ::std::result::Result>, ::aws_smithy_json::deserialize::error::DeserializeError> +where + I: Iterator, ::aws_smithy_json::deserialize::error::DeserializeError>>, +{ + match tokens.next().transpose()? { + Some(::aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), + Some(::aws_smithy_json::deserialize::Token::StartArray { .. }) => { + let mut items = Vec::new(); + loop { + match tokens.peek() { + Some(Ok(::aws_smithy_json::deserialize::Token::EndArray { .. })) => { + tokens.next().transpose().unwrap(); + break; + } + _ => { + let value = crate::protocol_serde::shape_subscription::de_subscription(tokens)?; + if let Some(value) = value { + items.push(value); + } + } + } + } + Ok(Some(items)) + } + _ => Err(::aws_smithy_json::deserialize::error::DeserializeError::custom( + "expected start array or null", + )), + } +} diff --git a/sdk/outposts/src/types.rs b/sdk/outposts/src/types.rs index 1aa9d28c1f84..e457cbd1dace 100644 --- a/sdk/outposts/src/types.rs +++ b/sdk/outposts/src/types.rs @@ -85,6 +85,12 @@ pub use crate::types::_asset_instance::AssetInstance; pub use crate::types::_instance_type_item::InstanceTypeItem; +pub use crate::types::_subscription::Subscription; + +pub use crate::types::_subscription_status::SubscriptionStatus; + +pub use crate::types::_subscription_type::SubscriptionType; + pub use crate::types::_order::Order; pub use crate::types::_payment_term::PaymentTerm; @@ -197,6 +203,12 @@ mod _shipment_information; mod _site; +mod _subscription; + +mod _subscription_status; + +mod _subscription_type; + mod _supported_hardware_type; mod _supported_storage_enum; diff --git a/sdk/outposts/src/types/_subscription.rs b/sdk/outposts/src/types/_subscription.rs new file mode 100644 index 000000000000..a13fb5089454 --- /dev/null +++ b/sdk/outposts/src/types/_subscription.rs @@ -0,0 +1,292 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +///

Provides information about your Amazon Web Services Outposts subscriptions.

+#[non_exhaustive] +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] +pub struct Subscription { + ///

The ID of the subscription that appears on the Amazon Web Services Billing Center console.

+ pub subscription_id: ::std::option::Option<::std::string::String>, + ///

The type of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    ORIGINAL - The first order on the Amazon Web Services Outposts.

  • + ///
  • + ///

    RENEWAL - Renewal requests, both month to month and longer term.

  • + ///
  • + ///

    CAPACITY_INCREASE - Capacity scaling orders.

  • + ///
+ pub subscription_type: ::std::option::Option, + ///

The status of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    INACTIVE - Subscription requests that are inactive.

  • + ///
  • + ///

    ACTIVE - Subscription requests that are in progress and have an end date in the future.

  • + ///
  • + ///

    CANCELLED - Subscription requests that are cancelled.

  • + ///
+ pub subscription_status: ::std::option::Option, + ///

The order ID for your subscription.

+ pub order_ids: ::std::option::Option<::std::vec::Vec<::std::string::String>>, + ///

The date your subscription starts.

+ pub begin_date: ::std::option::Option<::aws_smithy_types::DateTime>, + ///

The date your subscription ends.

+ pub end_date: ::std::option::Option<::aws_smithy_types::DateTime>, + ///

The amount you are billed each month in the subscription period.

+ pub monthly_recurring_price: ::std::option::Option, + ///

The amount billed when the subscription is created. This is a one-time charge.

+ pub upfront_price: ::std::option::Option, +} +impl Subscription { + ///

The ID of the subscription that appears on the Amazon Web Services Billing Center console.

+ pub fn subscription_id(&self) -> ::std::option::Option<&str> { + self.subscription_id.as_deref() + } + ///

The type of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    ORIGINAL - The first order on the Amazon Web Services Outposts.

  • + ///
  • + ///

    RENEWAL - Renewal requests, both month to month and longer term.

  • + ///
  • + ///

    CAPACITY_INCREASE - Capacity scaling orders.

  • + ///
+ pub fn subscription_type(&self) -> ::std::option::Option<&crate::types::SubscriptionType> { + self.subscription_type.as_ref() + } + ///

The status of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    INACTIVE - Subscription requests that are inactive.

  • + ///
  • + ///

    ACTIVE - Subscription requests that are in progress and have an end date in the future.

  • + ///
  • + ///

    CANCELLED - Subscription requests that are cancelled.

  • + ///
+ pub fn subscription_status(&self) -> ::std::option::Option<&crate::types::SubscriptionStatus> { + self.subscription_status.as_ref() + } + ///

The order ID for your subscription.

+ /// + /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.order_ids.is_none()`. + pub fn order_ids(&self) -> &[::std::string::String] { + self.order_ids.as_deref().unwrap_or_default() + } + ///

The date your subscription starts.

+ pub fn begin_date(&self) -> ::std::option::Option<&::aws_smithy_types::DateTime> { + self.begin_date.as_ref() + } + ///

The date your subscription ends.

+ pub fn end_date(&self) -> ::std::option::Option<&::aws_smithy_types::DateTime> { + self.end_date.as_ref() + } + ///

The amount you are billed each month in the subscription period.

+ pub fn monthly_recurring_price(&self) -> ::std::option::Option { + self.monthly_recurring_price + } + ///

The amount billed when the subscription is created. This is a one-time charge.

+ pub fn upfront_price(&self) -> ::std::option::Option { + self.upfront_price + } +} +impl Subscription { + /// Creates a new builder-style object to manufacture [`Subscription`](crate::types::Subscription). + pub fn builder() -> crate::types::builders::SubscriptionBuilder { + crate::types::builders::SubscriptionBuilder::default() + } +} + +/// A builder for [`Subscription`](crate::types::Subscription). +#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)] +#[non_exhaustive] +pub struct SubscriptionBuilder { + pub(crate) subscription_id: ::std::option::Option<::std::string::String>, + pub(crate) subscription_type: ::std::option::Option, + pub(crate) subscription_status: ::std::option::Option, + pub(crate) order_ids: ::std::option::Option<::std::vec::Vec<::std::string::String>>, + pub(crate) begin_date: ::std::option::Option<::aws_smithy_types::DateTime>, + pub(crate) end_date: ::std::option::Option<::aws_smithy_types::DateTime>, + pub(crate) monthly_recurring_price: ::std::option::Option, + pub(crate) upfront_price: ::std::option::Option, +} +impl SubscriptionBuilder { + ///

The ID of the subscription that appears on the Amazon Web Services Billing Center console.

+ pub fn subscription_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.subscription_id = ::std::option::Option::Some(input.into()); + self + } + ///

The ID of the subscription that appears on the Amazon Web Services Billing Center console.

+ pub fn set_subscription_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.subscription_id = input; + self + } + ///

The ID of the subscription that appears on the Amazon Web Services Billing Center console.

+ pub fn get_subscription_id(&self) -> &::std::option::Option<::std::string::String> { + &self.subscription_id + } + ///

The type of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    ORIGINAL - The first order on the Amazon Web Services Outposts.

  • + ///
  • + ///

    RENEWAL - Renewal requests, both month to month and longer term.

  • + ///
  • + ///

    CAPACITY_INCREASE - Capacity scaling orders.

  • + ///
+ pub fn subscription_type(mut self, input: crate::types::SubscriptionType) -> Self { + self.subscription_type = ::std::option::Option::Some(input); + self + } + ///

The type of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    ORIGINAL - The first order on the Amazon Web Services Outposts.

  • + ///
  • + ///

    RENEWAL - Renewal requests, both month to month and longer term.

  • + ///
  • + ///

    CAPACITY_INCREASE - Capacity scaling orders.

  • + ///
+ pub fn set_subscription_type(mut self, input: ::std::option::Option) -> Self { + self.subscription_type = input; + self + } + ///

The type of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    ORIGINAL - The first order on the Amazon Web Services Outposts.

  • + ///
  • + ///

    RENEWAL - Renewal requests, both month to month and longer term.

  • + ///
  • + ///

    CAPACITY_INCREASE - Capacity scaling orders.

  • + ///
+ pub fn get_subscription_type(&self) -> &::std::option::Option { + &self.subscription_type + } + ///

The status of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    INACTIVE - Subscription requests that are inactive.

  • + ///
  • + ///

    ACTIVE - Subscription requests that are in progress and have an end date in the future.

  • + ///
  • + ///

    CANCELLED - Subscription requests that are cancelled.

  • + ///
+ pub fn subscription_status(mut self, input: crate::types::SubscriptionStatus) -> Self { + self.subscription_status = ::std::option::Option::Some(input); + self + } + ///

The status of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    INACTIVE - Subscription requests that are inactive.

  • + ///
  • + ///

    ACTIVE - Subscription requests that are in progress and have an end date in the future.

  • + ///
  • + ///

    CANCELLED - Subscription requests that are cancelled.

  • + ///
+ pub fn set_subscription_status(mut self, input: ::std::option::Option) -> Self { + self.subscription_status = input; + self + } + ///

The status of subscription which can be one of the following:

+ ///
    + ///
  • + ///

    INACTIVE - Subscription requests that are inactive.

  • + ///
  • + ///

    ACTIVE - Subscription requests that are in progress and have an end date in the future.

  • + ///
  • + ///

    CANCELLED - Subscription requests that are cancelled.

  • + ///
+ pub fn get_subscription_status(&self) -> &::std::option::Option { + &self.subscription_status + } + /// Appends an item to `order_ids`. + /// + /// To override the contents of this collection use [`set_order_ids`](Self::set_order_ids). + /// + ///

The order ID for your subscription.

+ pub fn order_ids(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + let mut v = self.order_ids.unwrap_or_default(); + v.push(input.into()); + self.order_ids = ::std::option::Option::Some(v); + self + } + ///

The order ID for your subscription.

+ pub fn set_order_ids(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self { + self.order_ids = input; + self + } + ///

The order ID for your subscription.

+ pub fn get_order_ids(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> { + &self.order_ids + } + ///

The date your subscription starts.

+ pub fn begin_date(mut self, input: ::aws_smithy_types::DateTime) -> Self { + self.begin_date = ::std::option::Option::Some(input); + self + } + ///

The date your subscription starts.

+ pub fn set_begin_date(mut self, input: ::std::option::Option<::aws_smithy_types::DateTime>) -> Self { + self.begin_date = input; + self + } + ///

The date your subscription starts.

+ pub fn get_begin_date(&self) -> &::std::option::Option<::aws_smithy_types::DateTime> { + &self.begin_date + } + ///

The date your subscription ends.

+ pub fn end_date(mut self, input: ::aws_smithy_types::DateTime) -> Self { + self.end_date = ::std::option::Option::Some(input); + self + } + ///

The date your subscription ends.

+ pub fn set_end_date(mut self, input: ::std::option::Option<::aws_smithy_types::DateTime>) -> Self { + self.end_date = input; + self + } + ///

The date your subscription ends.

+ pub fn get_end_date(&self) -> &::std::option::Option<::aws_smithy_types::DateTime> { + &self.end_date + } + ///

The amount you are billed each month in the subscription period.

+ pub fn monthly_recurring_price(mut self, input: f64) -> Self { + self.monthly_recurring_price = ::std::option::Option::Some(input); + self + } + ///

The amount you are billed each month in the subscription period.

+ pub fn set_monthly_recurring_price(mut self, input: ::std::option::Option) -> Self { + self.monthly_recurring_price = input; + self + } + ///

The amount you are billed each month in the subscription period.

+ pub fn get_monthly_recurring_price(&self) -> &::std::option::Option { + &self.monthly_recurring_price + } + ///

The amount billed when the subscription is created. This is a one-time charge.

+ pub fn upfront_price(mut self, input: f64) -> Self { + self.upfront_price = ::std::option::Option::Some(input); + self + } + ///

The amount billed when the subscription is created. This is a one-time charge.

+ pub fn set_upfront_price(mut self, input: ::std::option::Option) -> Self { + self.upfront_price = input; + self + } + ///

The amount billed when the subscription is created. This is a one-time charge.

+ pub fn get_upfront_price(&self) -> &::std::option::Option { + &self.upfront_price + } + /// Consumes the builder and constructs a [`Subscription`](crate::types::Subscription). + pub fn build(self) -> crate::types::Subscription { + crate::types::Subscription { + subscription_id: self.subscription_id, + subscription_type: self.subscription_type, + subscription_status: self.subscription_status, + order_ids: self.order_ids, + begin_date: self.begin_date, + end_date: self.end_date, + monthly_recurring_price: self.monthly_recurring_price, + upfront_price: self.upfront_price, + } + } +} diff --git a/sdk/outposts/src/types/_subscription_status.rs b/sdk/outposts/src/types/_subscription_status.rs new file mode 100644 index 000000000000..83dd232e2869 --- /dev/null +++ b/sdk/outposts/src/types/_subscription_status.rs @@ -0,0 +1,114 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +/// When writing a match expression against `SubscriptionStatus`, it is important to ensure +/// your code is forward-compatible. That is, if a match arm handles a case for a +/// feature that is supported by the service but has not been represented as an enum +/// variant in a current version of SDK, your code should continue to work when you +/// upgrade SDK to a future version in which the enum does include a variant for that +/// feature. +/// +/// Here is an example of how you can make a match expression forward-compatible: +/// +/// ```text +/// # let subscriptionstatus = unimplemented!(); +/// match subscriptionstatus { +/// SubscriptionStatus::Active => { /* ... */ }, +/// SubscriptionStatus::Cancelled => { /* ... */ }, +/// SubscriptionStatus::Inactive => { /* ... */ }, +/// other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ }, +/// _ => { /* ... */ }, +/// } +/// ``` +/// The above code demonstrates that when `subscriptionstatus` represents +/// `NewFeature`, the execution path will lead to the second last match arm, +/// even though the enum does not contain a variant `SubscriptionStatus::NewFeature` +/// in the current version of SDK. The reason is that the variable `other`, +/// created by the `@` operator, is bound to +/// `SubscriptionStatus::Unknown(UnknownVariantValue("NewFeature".to_owned()))` +/// and calling `as_str` on it yields `"NewFeature"`. +/// This match expression is forward-compatible when executed with a newer +/// version of SDK where the variant `SubscriptionStatus::NewFeature` is defined. +/// Specifically, when `subscriptionstatus` represents `NewFeature`, +/// the execution path will hit the second last match arm as before by virtue of +/// calling `as_str` on `SubscriptionStatus::NewFeature` also yielding `"NewFeature"`. +/// +/// Explicitly matching on the `Unknown` variant should +/// be avoided for two reasons: +/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted. +/// - It might inadvertently shadow other intended match arms. +/// +#[allow(missing_docs)] // documentation missing in model +#[non_exhaustive] +#[derive( + ::std::clone::Clone, ::std::cmp::Eq, ::std::cmp::Ord, ::std::cmp::PartialEq, ::std::cmp::PartialOrd, ::std::fmt::Debug, ::std::hash::Hash, +)] +pub enum SubscriptionStatus { + #[allow(missing_docs)] // documentation missing in model + Active, + #[allow(missing_docs)] // documentation missing in model + Cancelled, + #[allow(missing_docs)] // documentation missing in model + Inactive, + /// `Unknown` contains new variants that have been added since this code was generated. + #[deprecated(note = "Don't directly match on `Unknown`. See the docs on this enum for the correct way to handle unknown variants.")] + Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue), +} +impl ::std::convert::From<&str> for SubscriptionStatus { + fn from(s: &str) -> Self { + match s { + "ACTIVE" => SubscriptionStatus::Active, + "CANCELLED" => SubscriptionStatus::Cancelled, + "INACTIVE" => SubscriptionStatus::Inactive, + other => SubscriptionStatus::Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue(other.to_owned())), + } + } +} +impl ::std::str::FromStr for SubscriptionStatus { + type Err = ::std::convert::Infallible; + + fn from_str(s: &str) -> ::std::result::Result::Err> { + ::std::result::Result::Ok(SubscriptionStatus::from(s)) + } +} +impl SubscriptionStatus { + /// Returns the `&str` value of the enum member. + pub fn as_str(&self) -> &str { + match self { + SubscriptionStatus::Active => "ACTIVE", + SubscriptionStatus::Cancelled => "CANCELLED", + SubscriptionStatus::Inactive => "INACTIVE", + SubscriptionStatus::Unknown(value) => value.as_str(), + } + } + /// Returns all the `&str` representations of the enum members. + pub const fn values() -> &'static [&'static str] { + &["ACTIVE", "CANCELLED", "INACTIVE"] + } +} +impl ::std::convert::AsRef for SubscriptionStatus { + fn as_ref(&self) -> &str { + self.as_str() + } +} +impl SubscriptionStatus { + /// Parses the enum value while disallowing unknown variants. + /// + /// Unknown variants will result in an error. + pub fn try_parse(value: &str) -> ::std::result::Result { + match Self::from(value) { + #[allow(deprecated)] + Self::Unknown(_) => ::std::result::Result::Err(crate::error::UnknownVariantError::new(value)), + known => Ok(known), + } + } +} +impl ::std::fmt::Display for SubscriptionStatus { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match self { + SubscriptionStatus::Active => write!(f, "ACTIVE"), + SubscriptionStatus::Cancelled => write!(f, "CANCELLED"), + SubscriptionStatus::Inactive => write!(f, "INACTIVE"), + SubscriptionStatus::Unknown(value) => write!(f, "{}", value), + } + } +} diff --git a/sdk/outposts/src/types/_subscription_type.rs b/sdk/outposts/src/types/_subscription_type.rs new file mode 100644 index 000000000000..ed70842ff35a --- /dev/null +++ b/sdk/outposts/src/types/_subscription_type.rs @@ -0,0 +1,114 @@ +// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. + +/// When writing a match expression against `SubscriptionType`, it is important to ensure +/// your code is forward-compatible. That is, if a match arm handles a case for a +/// feature that is supported by the service but has not been represented as an enum +/// variant in a current version of SDK, your code should continue to work when you +/// upgrade SDK to a future version in which the enum does include a variant for that +/// feature. +/// +/// Here is an example of how you can make a match expression forward-compatible: +/// +/// ```text +/// # let subscriptiontype = unimplemented!(); +/// match subscriptiontype { +/// SubscriptionType::CapacityIncrease => { /* ... */ }, +/// SubscriptionType::Original => { /* ... */ }, +/// SubscriptionType::Renewal => { /* ... */ }, +/// other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ }, +/// _ => { /* ... */ }, +/// } +/// ``` +/// The above code demonstrates that when `subscriptiontype` represents +/// `NewFeature`, the execution path will lead to the second last match arm, +/// even though the enum does not contain a variant `SubscriptionType::NewFeature` +/// in the current version of SDK. The reason is that the variable `other`, +/// created by the `@` operator, is bound to +/// `SubscriptionType::Unknown(UnknownVariantValue("NewFeature".to_owned()))` +/// and calling `as_str` on it yields `"NewFeature"`. +/// This match expression is forward-compatible when executed with a newer +/// version of SDK where the variant `SubscriptionType::NewFeature` is defined. +/// Specifically, when `subscriptiontype` represents `NewFeature`, +/// the execution path will hit the second last match arm as before by virtue of +/// calling `as_str` on `SubscriptionType::NewFeature` also yielding `"NewFeature"`. +/// +/// Explicitly matching on the `Unknown` variant should +/// be avoided for two reasons: +/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted. +/// - It might inadvertently shadow other intended match arms. +/// +#[allow(missing_docs)] // documentation missing in model +#[non_exhaustive] +#[derive( + ::std::clone::Clone, ::std::cmp::Eq, ::std::cmp::Ord, ::std::cmp::PartialEq, ::std::cmp::PartialOrd, ::std::fmt::Debug, ::std::hash::Hash, +)] +pub enum SubscriptionType { + #[allow(missing_docs)] // documentation missing in model + CapacityIncrease, + #[allow(missing_docs)] // documentation missing in model + Original, + #[allow(missing_docs)] // documentation missing in model + Renewal, + /// `Unknown` contains new variants that have been added since this code was generated. + #[deprecated(note = "Don't directly match on `Unknown`. See the docs on this enum for the correct way to handle unknown variants.")] + Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue), +} +impl ::std::convert::From<&str> for SubscriptionType { + fn from(s: &str) -> Self { + match s { + "CAPACITY_INCREASE" => SubscriptionType::CapacityIncrease, + "ORIGINAL" => SubscriptionType::Original, + "RENEWAL" => SubscriptionType::Renewal, + other => SubscriptionType::Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue(other.to_owned())), + } + } +} +impl ::std::str::FromStr for SubscriptionType { + type Err = ::std::convert::Infallible; + + fn from_str(s: &str) -> ::std::result::Result::Err> { + ::std::result::Result::Ok(SubscriptionType::from(s)) + } +} +impl SubscriptionType { + /// Returns the `&str` value of the enum member. + pub fn as_str(&self) -> &str { + match self { + SubscriptionType::CapacityIncrease => "CAPACITY_INCREASE", + SubscriptionType::Original => "ORIGINAL", + SubscriptionType::Renewal => "RENEWAL", + SubscriptionType::Unknown(value) => value.as_str(), + } + } + /// Returns all the `&str` representations of the enum members. + pub const fn values() -> &'static [&'static str] { + &["CAPACITY_INCREASE", "ORIGINAL", "RENEWAL"] + } +} +impl ::std::convert::AsRef for SubscriptionType { + fn as_ref(&self) -> &str { + self.as_str() + } +} +impl SubscriptionType { + /// Parses the enum value while disallowing unknown variants. + /// + /// Unknown variants will result in an error. + pub fn try_parse(value: &str) -> ::std::result::Result { + match Self::from(value) { + #[allow(deprecated)] + Self::Unknown(_) => ::std::result::Result::Err(crate::error::UnknownVariantError::new(value)), + known => Ok(known), + } + } +} +impl ::std::fmt::Display for SubscriptionType { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match self { + SubscriptionType::CapacityIncrease => write!(f, "CAPACITY_INCREASE"), + SubscriptionType::Original => write!(f, "ORIGINAL"), + SubscriptionType::Renewal => write!(f, "RENEWAL"), + SubscriptionType::Unknown(value) => write!(f, "{}", value), + } + } +} diff --git a/sdk/outposts/src/types/builders.rs b/sdk/outposts/src/types/builders.rs index 8910381df71d..c75b5efa8a49 100644 --- a/sdk/outposts/src/types/builders.rs +++ b/sdk/outposts/src/types/builders.rs @@ -35,6 +35,8 @@ pub use crate::types::_asset_instance::AssetInstanceBuilder; pub use crate::types::_instance_type_item::InstanceTypeItemBuilder; +pub use crate::types::_subscription::SubscriptionBuilder; + pub use crate::types::_order::OrderBuilder; pub use crate::types::_line_item::LineItemBuilder; diff --git a/sdk/sesv2/Cargo.toml b/sdk/sesv2/Cargo.toml index ed8cc5888f34..d9ea5a64e4d6 100644 --- a/sdk/sesv2/Cargo.toml +++ b/sdk/sesv2/Cargo.toml @@ -1,7 +1,7 @@ # Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. [package] name = "aws-sdk-sesv2" -version = "1.87.0" +version = "1.88.0" authors = ["AWS Rust SDK Team ", "Russell Cohen "] description = "AWS SDK for Amazon Simple Email Service" edition = "2021" diff --git a/sdk/sesv2/README.md b/sdk/sesv2/README.md index 77a886dc6f00..8764d9f6bcf4 100644 --- a/sdk/sesv2/README.md +++ b/sdk/sesv2/README.md @@ -16,7 +16,7 @@ your project, add the following to your **Cargo.toml** file: ```toml [dependencies] aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-sesv2 = "1.87.0" +aws-sdk-sesv2 = "1.88.0" tokio = { version = "1", features = ["full"] } ``` diff --git a/sdk/sesv2/src/lib.rs b/sdk/sesv2/src/lib.rs index 5f172ea5a0f1..c2ef1074f538 100644 --- a/sdk/sesv2/src/lib.rs +++ b/sdk/sesv2/src/lib.rs @@ -34,7 +34,7 @@ //! ```toml //! [dependencies] //! aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -//! aws-sdk-sesv2 = "1.87.0" +//! aws-sdk-sesv2 = "1.88.0" //! tokio = { version = "1", features = ["full"] } //! ``` //! diff --git a/sdk/sesv2/src/types/_dedicated_ip.rs b/sdk/sesv2/src/types/_dedicated_ip.rs index b64b318248c8..a1fddd3d639a 100644 --- a/sdk/sesv2/src/types/_dedicated_ip.rs +++ b/sdk/sesv2/src/types/_dedicated_ip.rs @@ -13,9 +13,17 @@ pub struct DedicatedIp { ///

IN_PROGRESS – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.

///
  • ///

    DONE – The dedicated IP warm-up process is complete, and the IP address is ready to use.

  • + ///
  • + ///

    NOT_APPLICABLE – The warm-up status doesn't apply to this IP address. This status is used for IP addresses in managed dedicated IP pools, where Amazon SES automatically handles the warm-up process.

  • /// pub warmup_status: crate::types::WarmupStatus, - ///

    Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.

    + ///

    Indicates the progress of your dedicated IP warm-up:

    + ///
      + ///
    • + ///

      0-100 – For standard dedicated IP addresses, this shows the warm-up completion percentage. A value of 100 means the IP address is fully warmed up and ready for use.

    • + ///
    • + ///

      -1 – Appears for IP addresses in managed dedicated pools where Amazon SES automatically handles the warm-up process, making the percentage not applicable.

    • + ///
    pub warmup_percentage: i32, ///

    The name of the dedicated IP pool that the IP address is associated with.

    pub pool_name: ::std::option::Option<::std::string::String>, @@ -32,11 +40,19 @@ impl DedicatedIp { ///

    IN_PROGRESS – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.

    ///
  • ///

    DONE – The dedicated IP warm-up process is complete, and the IP address is ready to use.

  • + ///
  • + ///

    NOT_APPLICABLE – The warm-up status doesn't apply to this IP address. This status is used for IP addresses in managed dedicated IP pools, where Amazon SES automatically handles the warm-up process.

  • /// pub fn warmup_status(&self) -> &crate::types::WarmupStatus { &self.warmup_status } - ///

    Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.

    + ///

    Indicates the progress of your dedicated IP warm-up:

    + ///
      + ///
    • + ///

      0-100 – For standard dedicated IP addresses, this shows the warm-up completion percentage. A value of 100 means the IP address is fully warmed up and ready for use.

    • + ///
    • + ///

      -1 – Appears for IP addresses in managed dedicated pools where Amazon SES automatically handles the warm-up process, making the percentage not applicable.

    • + ///
    pub fn warmup_percentage(&self) -> i32 { self.warmup_percentage } @@ -83,6 +99,8 @@ impl DedicatedIpBuilder { ///

    IN_PROGRESS – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.

    ///
  • ///

    DONE – The dedicated IP warm-up process is complete, and the IP address is ready to use.

  • + ///
  • + ///

    NOT_APPLICABLE – The warm-up status doesn't apply to this IP address. This status is used for IP addresses in managed dedicated IP pools, where Amazon SES automatically handles the warm-up process.

  • /// /// This field is required. pub fn warmup_status(mut self, input: crate::types::WarmupStatus) -> Self { @@ -95,6 +113,8 @@ impl DedicatedIpBuilder { ///

    IN_PROGRESS – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.

    ///
  • ///

    DONE – The dedicated IP warm-up process is complete, and the IP address is ready to use.

  • + ///
  • + ///

    NOT_APPLICABLE – The warm-up status doesn't apply to this IP address. This status is used for IP addresses in managed dedicated IP pools, where Amazon SES automatically handles the warm-up process.

  • /// pub fn set_warmup_status(mut self, input: ::std::option::Option) -> Self { self.warmup_status = input; @@ -106,22 +126,42 @@ impl DedicatedIpBuilder { ///

    IN_PROGRESS – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.

    ///
  • ///

    DONE – The dedicated IP warm-up process is complete, and the IP address is ready to use.

  • + ///
  • + ///

    NOT_APPLICABLE – The warm-up status doesn't apply to this IP address. This status is used for IP addresses in managed dedicated IP pools, where Amazon SES automatically handles the warm-up process.

  • /// pub fn get_warmup_status(&self) -> &::std::option::Option { &self.warmup_status } - ///

    Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.

    + ///

    Indicates the progress of your dedicated IP warm-up:

    + ///
      + ///
    • + ///

      0-100 – For standard dedicated IP addresses, this shows the warm-up completion percentage. A value of 100 means the IP address is fully warmed up and ready for use.

    • + ///
    • + ///

      -1 – Appears for IP addresses in managed dedicated pools where Amazon SES automatically handles the warm-up process, making the percentage not applicable.

    • + ///
    /// This field is required. pub fn warmup_percentage(mut self, input: i32) -> Self { self.warmup_percentage = ::std::option::Option::Some(input); self } - ///

    Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.

    + ///

    Indicates the progress of your dedicated IP warm-up:

    + ///
      + ///
    • + ///

      0-100 – For standard dedicated IP addresses, this shows the warm-up completion percentage. A value of 100 means the IP address is fully warmed up and ready for use.

    • + ///
    • + ///

      -1 – Appears for IP addresses in managed dedicated pools where Amazon SES automatically handles the warm-up process, making the percentage not applicable.

    • + ///
    pub fn set_warmup_percentage(mut self, input: ::std::option::Option) -> Self { self.warmup_percentage = input; self } - ///

    Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.

    + ///

    Indicates the progress of your dedicated IP warm-up:

    + ///
      + ///
    • + ///

      0-100 – For standard dedicated IP addresses, this shows the warm-up completion percentage. A value of 100 means the IP address is fully warmed up and ready for use.

    • + ///
    • + ///

      -1 – Appears for IP addresses in managed dedicated pools where Amazon SES automatically handles the warm-up process, making the percentage not applicable.

    • + ///
    pub fn get_warmup_percentage(&self) -> &::std::option::Option { &self.warmup_percentage } diff --git a/sdk/sesv2/src/types/_warmup_status.rs b/sdk/sesv2/src/types/_warmup_status.rs index 14b8e39832c5..d4d3d3a2c00a 100644 --- a/sdk/sesv2/src/types/_warmup_status.rs +++ b/sdk/sesv2/src/types/_warmup_status.rs @@ -14,6 +14,7 @@ /// match warmupstatus { /// WarmupStatus::Done => { /* ... */ }, /// WarmupStatus::InProgress => { /* ... */ }, +/// WarmupStatus::NotApplicable => { /* ... */ }, /// other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ }, /// _ => { /* ... */ }, /// } @@ -46,6 +47,8 @@ pub enum WarmupStatus { Done, #[allow(missing_docs)] // documentation missing in model InProgress, + #[allow(missing_docs)] // documentation missing in model + NotApplicable, /// `Unknown` contains new variants that have been added since this code was generated. #[deprecated(note = "Don't directly match on `Unknown`. See the docs on this enum for the correct way to handle unknown variants.")] Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue), @@ -55,6 +58,7 @@ impl ::std::convert::From<&str> for WarmupStatus { match s { "DONE" => WarmupStatus::Done, "IN_PROGRESS" => WarmupStatus::InProgress, + "NOT_APPLICABLE" => WarmupStatus::NotApplicable, other => WarmupStatus::Unknown(crate::primitives::sealed_enum_unknown::UnknownVariantValue(other.to_owned())), } } @@ -72,12 +76,13 @@ impl WarmupStatus { match self { WarmupStatus::Done => "DONE", WarmupStatus::InProgress => "IN_PROGRESS", + WarmupStatus::NotApplicable => "NOT_APPLICABLE", WarmupStatus::Unknown(value) => value.as_str(), } } /// Returns all the `&str` representations of the enum members. pub const fn values() -> &'static [&'static str] { - &["DONE", "IN_PROGRESS"] + &["DONE", "IN_PROGRESS", "NOT_APPLICABLE"] } } impl ::std::convert::AsRef for WarmupStatus { @@ -102,6 +107,7 @@ impl ::std::fmt::Display for WarmupStatus { match self { WarmupStatus::Done => write!(f, "DONE"), WarmupStatus::InProgress => write!(f, "IN_PROGRESS"), + WarmupStatus::NotApplicable => write!(f, "NOT_APPLICABLE"), WarmupStatus::Unknown(value) => write!(f, "{}", value), } } diff --git a/sdk/ssm/Cargo.toml b/sdk/ssm/Cargo.toml index 5f6010834a3d..efa2e0b1314a 100644 --- a/sdk/ssm/Cargo.toml +++ b/sdk/ssm/Cargo.toml @@ -1,7 +1,7 @@ # Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. [package] name = "aws-sdk-ssm" -version = "1.85.0" +version = "1.85.1" authors = ["AWS Rust SDK Team ", "Russell Cohen "] description = "AWS SDK for Amazon Simple Systems Manager (SSM)" edition = "2021" diff --git a/sdk/ssm/README.md b/sdk/ssm/README.md index e602c440d8e8..26ec65745598 100644 --- a/sdk/ssm/README.md +++ b/sdk/ssm/README.md @@ -6,7 +6,7 @@ This reference is intended to be used with the [Amazon Web Services Systems Mana __Related resources__ - For information about each of the tools that comprise Systems Manager, see [Using Systems Manager tools](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-tools.html) in the _Amazon Web Services Systems Manager User Guide_. - - For details about predefined runbooks for Automation, a tool in Amazon Web Services Systems Manager, see the _ [Systems Manager Automation runbook reference](https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-runbook-reference.html) _. + - For details about predefined runbooks for Automation, a tool in Amazon Web Services Systems Manager, see the _ [Systems Manager Automation Runbook Reference](https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-runbook-reference.html) _. - For information about AppConfig, a tool in Systems Manager, see the _ [AppConfig User Guide](https://docs.aws.amazon.com/appconfig/latest/userguide/) _ and the _ [AppConfig API Reference](https://docs.aws.amazon.com/appconfig/2019-10-09/APIReference/) _. - For information about Incident Manager, a tool in Systems Manager, see the _ [Systems Manager Incident Manager User Guide](https://docs.aws.amazon.com/incident-manager/latest/userguide/) _ and the _ [Systems Manager Incident Manager API Reference](https://docs.aws.amazon.com/incident-manager/latest/APIReference/) _. @@ -22,7 +22,7 @@ your project, add the following to your **Cargo.toml** file: ```toml [dependencies] aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-ssm = "1.85.0" +aws-sdk-ssm = "1.85.1" tokio = { version = "1", features = ["full"] } ``` diff --git a/sdk/ssm/src/client/put_parameter.rs b/sdk/ssm/src/client/put_parameter.rs index 01f229c9656f..ac47f611119f 100644 --- a/sdk/ssm/src/client/put_parameter.rs +++ b/sdk/ssm/src/client/put_parameter.rs @@ -3,7 +3,7 @@ impl super::Client { /// Constructs a fluent builder for the [`PutParameter`](crate::operation::put_parameter::builders::PutParameterFluentBuilder) operation. /// /// - The fluent builder is configurable: - /// - [`name(impl Into)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::name) / [`set_name(Option)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::set_name):
    required: **true**

    The fully qualified name of the parameter that you want to create or update.

    You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

    The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

    Naming Constraints:

    • Parameter names are case sensitive.

    • A parameter name must be unique within an Amazon Web Services Region

    • A parameter name can't be prefixed with "aws" or "ssm" (case-insensitive).

    • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

      In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    • A parameter name can't include spaces.

    • Parameter hierarchies are limited to a maximum depth of fifteen levels.

    For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

    The reported maximum length of 2048 characters for a parameter name includes 1037 characters that are reserved for internal use by Systems Manager. The maximum length for a parameter name that you specify is 1011 characters.

    This count of 1011 characters includes the characters in the ARN that precede the name you specify. This ARN length will vary depending on your partition and Region. For example, the following 45 characters count toward the 1011 character maximum for a parameter created in the US East (Ohio) Region: arn:aws:ssm:us-east-2:111122223333:parameter/.


    + /// - [`name(impl Into)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::name) / [`set_name(Option)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::set_name):
    required: **true**

    The fully qualified name of the parameter that you want to create or update.

    You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

    The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

    Naming Constraints:

    • Parameter names are case sensitive.

    • A parameter name must be unique within an Amazon Web Services Region

    • A parameter name can't be prefixed with "aws" or "ssm" (case-insensitive).

    • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

      In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    • Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    • Parameter hierarchies are limited to a maximum depth of fifteen levels.

    For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

    The reported maximum length of 2048 characters for a parameter name includes 1037 characters that are reserved for internal use by Systems Manager. The maximum length for a parameter name that you specify is 1011 characters.

    This count of 1011 characters includes the characters in the ARN that precede the name you specify. This ARN length will vary depending on your partition and Region. For example, the following 45 characters count toward the 1011 character maximum for a parameter created in the US East (Ohio) Region: arn:aws:ssm:us-east-2:111122223333:parameter/.


    /// - [`description(impl Into)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::description) / [`set_description(Option)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::set_description):
    required: **false**

    Information about the parameter that you want to add to the system. Optional but recommended.

    Don't enter personally identifiable information in this field.


    /// - [`value(impl Into)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::value) / [`set_value(Option)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::set_value):
    required: **true**

    The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB.

    Parameters can't be referenced or nested in the values of other parameters. You can't include values wrapped in double brackets {{}} or {{ssm:parameter-name}} in a parameter value.


    /// - [`r#type(ParameterType)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::type) / [`set_type(Option)`](crate::operation::put_parameter::builders::PutParameterFluentBuilder::set_type):
    required: **false**

    The type of parameter that you want to create.

    SecureString isn't currently supported for CloudFormation templates.

    Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

    Specifying a parameter type isn't required when updating a parameter. You must specify a parameter type when creating a parameter.


    diff --git a/sdk/ssm/src/lib.rs b/sdk/ssm/src/lib.rs index 898480ae99c7..36da6854bfa2 100644 --- a/sdk/ssm/src/lib.rs +++ b/sdk/ssm/src/lib.rs @@ -24,7 +24,7 @@ //! //! __Related resources__ //! - For information about each of the tools that comprise Systems Manager, see [Using Systems Manager tools](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-tools.html) in the _Amazon Web Services Systems Manager User Guide_. -//! - For details about predefined runbooks for Automation, a tool in Amazon Web Services Systems Manager, see the _ [Systems Manager Automation runbook reference](https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-runbook-reference.html) _. +//! - For details about predefined runbooks for Automation, a tool in Amazon Web Services Systems Manager, see the _ [Systems Manager Automation Runbook Reference](https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-runbook-reference.html) _. //! - For information about AppConfig, a tool in Systems Manager, see the _ [AppConfig User Guide](https://docs.aws.amazon.com/appconfig/latest/userguide/) _ and the _ [AppConfig API Reference](https://docs.aws.amazon.com/appconfig/2019-10-09/APIReference/) _. //! - For information about Incident Manager, a tool in Systems Manager, see the _ [Systems Manager Incident Manager User Guide](https://docs.aws.amazon.com/incident-manager/latest/userguide/) _ and the _ [Systems Manager Incident Manager API Reference](https://docs.aws.amazon.com/incident-manager/latest/APIReference/) _. //! @@ -40,7 +40,7 @@ //! ```toml //! [dependencies] //! aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -//! aws-sdk-ssm = "1.85.0" +//! aws-sdk-ssm = "1.85.1" //! tokio = { version = "1", features = ["full"] } //! ``` //! diff --git a/sdk/ssm/src/operation/describe_parameters/builders.rs b/sdk/ssm/src/operation/describe_parameters/builders.rs index 557ceea248fc..9013df7e74db 100644 --- a/sdk/ssm/src/operation/describe_parameters/builders.rs +++ b/sdk/ssm/src/operation/describe_parameters/builders.rs @@ -23,7 +23,8 @@ impl crate::operation::describe_parameters::builders::DescribeParametersInputBui /// Fluent builder constructing a request to `DescribeParameters`. /// ///

    Lists the parameters in your Amazon Web Services account or the parameters shared with you when you enable the Shared option.

    -///

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    +///

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    +///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///

    If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, DescribeParameters retrieves whatever the original key alias was referencing.

    ///
    #[derive(::std::clone::Clone, ::std::fmt::Debug)] diff --git a/sdk/ssm/src/operation/get_parameter/builders.rs b/sdk/ssm/src/operation/get_parameter/builders.rs index d4747608c0b4..df53d8f9f708 100644 --- a/sdk/ssm/src/operation/get_parameter/builders.rs +++ b/sdk/ssm/src/operation/get_parameter/builders.rs @@ -22,7 +22,8 @@ impl crate::operation::get_parameter::builders::GetParameterInputBuilder { } /// Fluent builder constructing a request to `GetParameter`. /// -///

    Get information about a single parameter by specifying the parameter name.

    +///

    Get information about a single parameter by specifying the parameter name.

    +///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///

    To get information about more than one parameter at a time, use the GetParameters operation.

    ///
    #[derive(::std::clone::Clone, ::std::fmt::Debug)] diff --git a/sdk/ssm/src/operation/get_parameter_history/builders.rs b/sdk/ssm/src/operation/get_parameter_history/builders.rs index 21466d4bd866..5088daf0c9b1 100644 --- a/sdk/ssm/src/operation/get_parameter_history/builders.rs +++ b/sdk/ssm/src/operation/get_parameter_history/builders.rs @@ -22,7 +22,8 @@ impl crate::operation::get_parameter_history::builders::GetParameterHistoryInput } /// Fluent builder constructing a request to `GetParameterHistory`. /// -///

    Retrieves the history of all changes to a parameter.

    +///

    Retrieves the history of all changes to a parameter.

    +///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///

    If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, GetParameterHistory retrieves whatever the original key alias was referencing.

    ///
    #[derive(::std::clone::Clone, ::std::fmt::Debug)] diff --git a/sdk/ssm/src/operation/get_parameters/builders.rs b/sdk/ssm/src/operation/get_parameters/builders.rs index a990dbb120b4..17340dfd3873 100644 --- a/sdk/ssm/src/operation/get_parameters/builders.rs +++ b/sdk/ssm/src/operation/get_parameters/builders.rs @@ -25,6 +25,7 @@ impl crate::operation::get_parameters::builders::GetParametersInputBuilder { ///

    Get information about one or more parameters by specifying multiple parameter names.

    ///

    To get information about a single parameter, you can use the GetParameter operation instead.

    ///
    +///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    #[derive(::std::clone::Clone, ::std::fmt::Debug)] pub struct GetParametersFluentBuilder { handle: ::std::sync::Arc, diff --git a/sdk/ssm/src/operation/get_parameters_by_path/builders.rs b/sdk/ssm/src/operation/get_parameters_by_path/builders.rs index 3a281090856b..3b675762f6ab 100644 --- a/sdk/ssm/src/operation/get_parameters_by_path/builders.rs +++ b/sdk/ssm/src/operation/get_parameters_by_path/builders.rs @@ -24,6 +24,7 @@ impl crate::operation::get_parameters_by_path::builders::GetParametersByPathInpu /// ///

    Retrieve information about one or more parameters under a specified level in a hierarchy.

    ///

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    +///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    #[derive(::std::clone::Clone, ::std::fmt::Debug)] pub struct GetParametersByPathFluentBuilder { handle: ::std::sync::Arc, diff --git a/sdk/ssm/src/operation/label_parameter_version/builders.rs b/sdk/ssm/src/operation/label_parameter_version/builders.rs index bf9fff4a26a5..0bdeddc076f3 100644 --- a/sdk/ssm/src/operation/label_parameter_version/builders.rs +++ b/sdk/ssm/src/operation/label_parameter_version/builders.rs @@ -41,6 +41,8 @@ impl crate::operation::label_parameter_version::builders::LabelParameterVersionI ///

    Labels can contain letters (case sensitive), numbers, periods (.), hyphens (-), or underscores (_).

    ///
  • ///

    Labels can't begin with a number, "aws" or "ssm" (not case sensitive). If a label fails to meet these requirements, then the label isn't associated with a parameter and the system displays it in the list of InvalidLabels.

  • +///
  • +///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

  • /// #[derive(::std::clone::Clone, ::std::fmt::Debug)] pub struct LabelParameterVersionFluentBuilder { diff --git a/sdk/ssm/src/operation/put_compliance_items/builders.rs b/sdk/ssm/src/operation/put_compliance_items/builders.rs index 487eef3b9d45..e325f7c6a46f 100644 --- a/sdk/ssm/src/operation/put_compliance_items/builders.rs +++ b/sdk/ssm/src/operation/put_compliance_items/builders.rs @@ -30,7 +30,9 @@ impl crate::operation::put_compliance_items::builders::PutComplianceItemsInputBu ///
  • ///

    ExecutionType: Specify patch, association, or Custom:string.

  • ///
  • -///

    ExecutionTime. The time the patch, association, or custom compliance item was applied to the managed node.

  • +///

    ExecutionTime. The time the patch, association, or custom compliance item was applied to the managed node.

    +///

    For State Manager associations, this represents the time when compliance status was captured by the Systems Manager service during its internal compliance aggregation workflow, not necessarily when the association was executed on the managed node. State Manager updates compliance information for all associations on an instance whenever any association executes, which may result in multiple associations showing the same execution time.

    +///
    ///
  • ///

    Id: The patch, association, or custom compliance ID.

  • ///
  • diff --git a/sdk/ssm/src/operation/put_parameter/_put_parameter_input.rs b/sdk/ssm/src/operation/put_parameter/_put_parameter_input.rs index 0cc0eb00a6e1..23d30a434ac8 100644 --- a/sdk/ssm/src/operation/put_parameter/_put_parameter_input.rs +++ b/sdk/ssm/src/operation/put_parameter/_put_parameter_input.rs @@ -19,7 +19,7 @@ pub struct PutParameterInput { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

  • ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// @@ -131,7 +131,7 @@ impl PutParameterInput { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// @@ -307,7 +307,7 @@ impl PutParameterInputBuilder { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// @@ -336,7 +336,7 @@ impl PutParameterInputBuilder { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// @@ -364,7 +364,7 @@ impl PutParameterInputBuilder { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// diff --git a/sdk/ssm/src/operation/put_parameter/builders.rs b/sdk/ssm/src/operation/put_parameter/builders.rs index 1975ee31ebc7..d8cc59bcf3b9 100644 --- a/sdk/ssm/src/operation/put_parameter/builders.rs +++ b/sdk/ssm/src/operation/put_parameter/builders.rs @@ -124,7 +124,7 @@ impl PutParameterFluentBuilder { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// @@ -152,7 +152,7 @@ impl PutParameterFluentBuilder { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// @@ -180,7 +180,7 @@ impl PutParameterFluentBuilder { ///

    Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    ///

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    ///
  • - ///

    A parameter name can't include spaces.

  • + ///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    ///
  • ///

    Parameter hierarchies are limited to a maximum depth of fifteen levels.

  • /// diff --git a/sdk/ssm/src/operation/unlabel_parameter_version/builders.rs b/sdk/ssm/src/operation/unlabel_parameter_version/builders.rs index 55617198a2fe..263c414da23c 100644 --- a/sdk/ssm/src/operation/unlabel_parameter_version/builders.rs +++ b/sdk/ssm/src/operation/unlabel_parameter_version/builders.rs @@ -23,6 +23,7 @@ impl crate::operation::unlabel_parameter_version::builders::UnlabelParameterVers /// Fluent builder constructing a request to `UnlabelParameterVersion`. /// ///

    Remove a label or labels from a parameter.

    +///

    Parameter names can't contain spaces. The service removes any spaces specified for the beginning or end of a parameter name. If the specified name for a parameter contains spaces between characters, the request fails with a ValidationException error.

    #[derive(::std::clone::Clone, ::std::fmt::Debug)] pub struct UnlabelParameterVersionFluentBuilder { handle: ::std::sync::Arc, diff --git a/sdk/ssm/src/types/_compliance_execution_summary.rs b/sdk/ssm/src/types/_compliance_execution_summary.rs index 620390f49e5e..f78528332c81 100644 --- a/sdk/ssm/src/types/_compliance_execution_summary.rs +++ b/sdk/ssm/src/types/_compliance_execution_summary.rs @@ -4,7 +4,9 @@ #[non_exhaustive] #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] pub struct ComplianceExecutionSummary { - ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    For State Manager associations, this timestamp represents when the compliance status was captured and reported by the Systems Manager service, not when the underlying association was actually executed on the managed node. To track actual association execution times, use the DescribeAssociationExecutionTargets command or check the association execution history in the Systems Manager console.

    + ///
    pub execution_time: ::aws_smithy_types::DateTime, ///

    An ID created by the system when PutComplianceItems was called. For example, CommandID is a valid execution ID. You can use this ID in subsequent calls.

    pub execution_id: ::std::option::Option<::std::string::String>, @@ -12,7 +14,9 @@ pub struct ComplianceExecutionSummary { pub execution_type: ::std::option::Option<::std::string::String>, } impl ComplianceExecutionSummary { - ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    For State Manager associations, this timestamp represents when the compliance status was captured and reported by the Systems Manager service, not when the underlying association was actually executed on the managed node. To track actual association execution times, use the DescribeAssociationExecutionTargets command or check the association execution history in the Systems Manager console.

    + ///
    pub fn execution_time(&self) -> &::aws_smithy_types::DateTime { &self.execution_time } @@ -41,18 +45,24 @@ pub struct ComplianceExecutionSummaryBuilder { pub(crate) execution_type: ::std::option::Option<::std::string::String>, } impl ComplianceExecutionSummaryBuilder { - ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    For State Manager associations, this timestamp represents when the compliance status was captured and reported by the Systems Manager service, not when the underlying association was actually executed on the managed node. To track actual association execution times, use the DescribeAssociationExecutionTargets command or check the association execution history in the Systems Manager console.

    + ///
    /// This field is required. pub fn execution_time(mut self, input: ::aws_smithy_types::DateTime) -> Self { self.execution_time = ::std::option::Option::Some(input); self } - ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    For State Manager associations, this timestamp represents when the compliance status was captured and reported by the Systems Manager service, not when the underlying association was actually executed on the managed node. To track actual association execution times, use the DescribeAssociationExecutionTargets command or check the association execution history in the Systems Manager console.

    + ///
    pub fn set_execution_time(mut self, input: ::std::option::Option<::aws_smithy_types::DateTime>) -> Self { self.execution_time = input; self } - ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

    + ///

    For State Manager associations, this timestamp represents when the compliance status was captured and reported by the Systems Manager service, not when the underlying association was actually executed on the managed node. To track actual association execution times, use the DescribeAssociationExecutionTargets command or check the association execution history in the Systems Manager console.

    + ///
    pub fn get_execution_time(&self) -> &::std::option::Option<::aws_smithy_types::DateTime> { &self.execution_time } diff --git a/sdk/ssm/src/types/_compliance_item.rs b/sdk/ssm/src/types/_compliance_item.rs index 78a81c67c8f8..a05403d7b208 100644 --- a/sdk/ssm/src/types/_compliance_item.rs +++ b/sdk/ssm/src/types/_compliance_item.rs @@ -18,7 +18,9 @@ pub struct ComplianceItem { pub status: ::std::option::Option, ///

    The severity of the compliance status. Severity can be one of the following: Critical, High, Medium, Low, Informational, Unspecified.

    pub severity: ::std::option::Option, - ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    For State Manager associations, the ExecutionTime value represents when the compliance status was captured and aggregated by the Systems Manager service, not necessarily when the underlying association was executed on the managed node. State Manager updates compliance status for all associations on an instance whenever any association executes, which means multiple associations may show the same execution time even if they were executed at different times.

    + ///
    pub execution_summary: ::std::option::Option, ///

    A "Key": "Value" tag combination for the compliance item.

    pub details: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>, @@ -52,7 +54,9 @@ impl ComplianceItem { pub fn severity(&self) -> ::std::option::Option<&crate::types::ComplianceSeverity> { self.severity.as_ref() } - ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    For State Manager associations, the ExecutionTime value represents when the compliance status was captured and aggregated by the Systems Manager service, not necessarily when the underlying association was executed on the managed node. State Manager updates compliance status for all associations on an instance whenever any association executes, which means multiple associations may show the same execution time even if they were executed at different times.

    + ///
    pub fn execution_summary(&self) -> ::std::option::Option<&crate::types::ComplianceExecutionSummary> { self.execution_summary.as_ref() } @@ -181,17 +185,23 @@ impl ComplianceItemBuilder { pub fn get_severity(&self) -> &::std::option::Option { &self.severity } - ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    For State Manager associations, the ExecutionTime value represents when the compliance status was captured and aggregated by the Systems Manager service, not necessarily when the underlying association was executed on the managed node. State Manager updates compliance status for all associations on an instance whenever any association executes, which means multiple associations may show the same execution time even if they were executed at different times.

    + ///
    pub fn execution_summary(mut self, input: crate::types::ComplianceExecutionSummary) -> Self { self.execution_summary = ::std::option::Option::Some(input); self } - ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    For State Manager associations, the ExecutionTime value represents when the compliance status was captured and aggregated by the Systems Manager service, not necessarily when the underlying association was executed on the managed node. State Manager updates compliance status for all associations on an instance whenever any association executes, which means multiple associations may show the same execution time even if they were executed at different times.

    + ///
    pub fn set_execution_summary(mut self, input: ::std::option::Option) -> Self { self.execution_summary = input; self } - ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

    + ///

    For State Manager associations, the ExecutionTime value represents when the compliance status was captured and aggregated by the Systems Manager service, not necessarily when the underlying association was executed on the managed node. State Manager updates compliance status for all associations on an instance whenever any association executes, which means multiple associations may show the same execution time even if they were executed at different times.

    + ///
    pub fn get_execution_summary(&self) -> &::std::option::Option { &self.execution_summary } diff --git a/sdk/ssm/src/types/_inventory_filter.rs b/sdk/ssm/src/types/_inventory_filter.rs index f7ff198e1dcf..b48b52b56d5c 100644 --- a/sdk/ssm/src/types/_inventory_filter.rs +++ b/sdk/ssm/src/types/_inventory_filter.rs @@ -1,12 +1,23 @@ // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. ///

    One or more filters. Use a filter to return a more specific list of results.

    +///

    Example formats for the aws ssm get-inventory command:

    +///

    --filters Key=AWS:InstanceInformation.AgentType,Values=amazon-ssm-agent,Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.AgentVersion,Values=3.3.2299.0,Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.ComputerName,Values=ip-192.0.2.0.us-east-2.compute.internal,Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.InstanceId,Values=i-0a4cd6ceffEXAMPLE,i-1a2b3c4d5e6EXAMPLE,Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.InstanceStatus,Values=Active,Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.IpAddress,Values=198.51.100.0,Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.PlatformName,Values="Amazon Linux",Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.PlatformType,Values=Linux,Type=Equal

    +///

    --filters Key=AWS:InstanceInformation.PlatformVersion,Values=2023,Type=BeginWith

    +///

    --filters Key=AWS:InstanceInformation.ResourceType,Values=EC2Instance,Type=Equal

    #[non_exhaustive] #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] pub struct InventoryFilter { ///

    The name of the filter key.

    pub key: ::std::string::String, - ///

    Inventory filter values. Example: inventory filter where managed node IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal.

    + ///

    Inventory filter values.

    pub values: ::std::vec::Vec<::std::string::String>, ///

    The type of filter.

    ///

    The Exists filter must be used with aggregators. For more information, see Aggregating inventory data in the Amazon Web Services Systems Manager User Guide.

    @@ -19,7 +30,7 @@ impl InventoryFilter { use std::ops::Deref; self.key.deref() } - ///

    Inventory filter values. Example: inventory filter where managed node IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal.

    + ///

    Inventory filter values.

    pub fn values(&self) -> &[::std::string::String] { use std::ops::Deref; self.values.deref() @@ -66,19 +77,19 @@ impl InventoryFilterBuilder { /// /// To override the contents of this collection use [`set_values`](Self::set_values). /// - ///

    Inventory filter values. Example: inventory filter where managed node IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal.

    + ///

    Inventory filter values.

    pub fn values(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { let mut v = self.values.unwrap_or_default(); v.push(input.into()); self.values = ::std::option::Option::Some(v); self } - ///

    Inventory filter values. Example: inventory filter where managed node IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal.

    + ///

    Inventory filter values.

    pub fn set_values(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self { self.values = input; self } - ///

    Inventory filter values. Example: inventory filter where managed node IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal.

    + ///

    Inventory filter values.

    pub fn get_values(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> { &self.values } diff --git a/sdk/ssm/src/types/_patch_source.rs b/sdk/ssm/src/types/_patch_source.rs index dcdd54c6c099..2fc4df3790c7 100644 --- a/sdk/ssm/src/types/_patch_source.rs +++ b/sdk/ssm/src/types/_patch_source.rs @@ -8,13 +8,17 @@ pub struct PatchSource { pub name: ::std::string::String, ///

    The specific operating system versions a patch repository applies to, such as "Ubuntu16.04", "AmazonLinux2016.09", "RedhatEnterpriseLinux7.2" or "Suse12.7". For lists of supported product values, see PatchFilter.

    pub products: ::std::vec::Vec<::std::string::String>, - ///

    The value of the yum repo configuration. For example:

    + ///

    The value of the repo configuration.

    + ///

    Example for yum repositories

    ///

    \[main\]

    ///

    name=MyCustomRepository

    ///

    baseurl=https://my-custom-repository

    - ///

    enabled=1

    - ///

    For information about other options available for your yum repository configuration, see dnf.conf(5).

    - ///
    + ///

    enabled=1

    + ///

    For information about other options available for your yum repository configuration, see dnf.conf(5) on the man7.org website.

    + ///

    Examples for Ubuntu Server and Debian Server

    + ///

    deb http://security.ubuntu.com/ubuntu jammy main

    + ///

    deb https://site.example.com/debian distribution component1 component2 component3

    + ///

    Repo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see jammy (5) sources.list.5.gz on the Ubuntu Server Manuals website and sources.list format on the Debian Wiki.

    pub configuration: ::std::string::String, } impl PatchSource { @@ -28,13 +32,17 @@ impl PatchSource { use std::ops::Deref; self.products.deref() } - ///

    The value of the yum repo configuration. For example:

    + ///

    The value of the repo configuration.

    + ///

    Example for yum repositories

    ///

    \[main\]

    ///

    name=MyCustomRepository

    ///

    baseurl=https://my-custom-repository

    - ///

    enabled=1

    - ///

    For information about other options available for your yum repository configuration, see dnf.conf(5).

    - ///
    + ///

    enabled=1

    + ///

    For information about other options available for your yum repository configuration, see dnf.conf(5) on the man7.org website.

    + ///

    Examples for Ubuntu Server and Debian Server

    + ///

    deb http://security.ubuntu.com/ubuntu jammy main

    + ///

    deb https://site.example.com/debian distribution component1 component2 component3

    + ///

    Repo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see jammy (5) sources.list.5.gz on the Ubuntu Server Manuals website and sources.list format on the Debian Wiki.

    pub fn configuration(&self) -> &str { use std::ops::Deref; self.configuration.deref() @@ -100,36 +108,48 @@ impl PatchSourceBuilder { pub fn get_products(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> { &self.products } - ///

    The value of the yum repo configuration. For example:

    + ///

    The value of the repo configuration.

    + ///

    Example for yum repositories

    ///

    \[main\]

    ///

    name=MyCustomRepository

    ///

    baseurl=https://my-custom-repository

    - ///

    enabled=1

    - ///

    For information about other options available for your yum repository configuration, see dnf.conf(5).

    - ///
    + ///

    enabled=1

    + ///

    For information about other options available for your yum repository configuration, see dnf.conf(5) on the man7.org website.

    + ///

    Examples for Ubuntu Server and Debian Server

    + ///

    deb http://security.ubuntu.com/ubuntu jammy main

    + ///

    deb https://site.example.com/debian distribution component1 component2 component3

    + ///

    Repo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see jammy (5) sources.list.5.gz on the Ubuntu Server Manuals website and sources.list format on the Debian Wiki.

    /// This field is required. pub fn configuration(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.configuration = ::std::option::Option::Some(input.into()); self } - ///

    The value of the yum repo configuration. For example:

    + ///

    The value of the repo configuration.

    + ///

    Example for yum repositories

    ///

    \[main\]

    ///

    name=MyCustomRepository

    ///

    baseurl=https://my-custom-repository

    - ///

    enabled=1

    - ///

    For information about other options available for your yum repository configuration, see dnf.conf(5).

    - ///
    + ///

    enabled=1

    + ///

    For information about other options available for your yum repository configuration, see dnf.conf(5) on the man7.org website.

    + ///

    Examples for Ubuntu Server and Debian Server

    + ///

    deb http://security.ubuntu.com/ubuntu jammy main

    + ///

    deb https://site.example.com/debian distribution component1 component2 component3

    + ///

    Repo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see jammy (5) sources.list.5.gz on the Ubuntu Server Manuals website and sources.list format on the Debian Wiki.

    pub fn set_configuration(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.configuration = input; self } - ///

    The value of the yum repo configuration. For example:

    + ///

    The value of the repo configuration.

    + ///

    Example for yum repositories

    ///

    \[main\]

    ///

    name=MyCustomRepository

    ///

    baseurl=https://my-custom-repository

    - ///

    enabled=1

    - ///

    For information about other options available for your yum repository configuration, see dnf.conf(5).

    - ///
    + ///

    enabled=1

    + ///

    For information about other options available for your yum repository configuration, see dnf.conf(5) on the man7.org website.

    + ///

    Examples for Ubuntu Server and Debian Server

    + ///

    deb http://security.ubuntu.com/ubuntu jammy main

    + ///

    deb https://site.example.com/debian distribution component1 component2 component3

    + ///

    Repo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see jammy (5) sources.list.5.gz on the Ubuntu Server Manuals website and sources.list format on the Debian Wiki.

    pub fn get_configuration(&self) -> &::std::option::Option<::std::string::String> { &self.configuration } diff --git a/versions.toml b/versions.toml index a1c92e36daf3..39c9909f4700 100644 --- a/versions.toml +++ b/versions.toml @@ -220,9 +220,9 @@ model_hash = 'fb04c0b18a39db4e89247b06cb918793df9679d5dd1b398444639428f4b7341f' [crates.aws-sdk-auditmanager] category = 'AwsSdk' -version = '1.78.0' -source_hash = '0da87cd7c157d2731871c4d2606c7cba388e19c91124fbcffe25bd0190a3e801' -model_hash = '1998c7711a4cffc97babd562a7fbea50a5ea8fb0b1bae93a6d70a34717649b26' +version = '1.79.0' +source_hash = '5b1d1a8a2a78ef54577ee2cb99e31f553863404acd97c33a9d0a613ddc8fa571' +model_hash = '331f0ff3cefdc2c759cdf7101efb2e8cde1219a48fcc03033ad55ecb9f294bbf' [crates.aws-sdk-autoscaling] category = 'AwsSdk' @@ -490,9 +490,9 @@ model_hash = '5497d1b84409af6543689f08c00475dd0942c86b4e575186d7c587da07ec97a0' [crates.aws-sdk-cloudwatchlogs] category = 'AwsSdk' -version = '1.93.0' -source_hash = 'ca55e214bb7fc20d96118551a6ed1e16a11ce1950ce1bf9fc45781b6d6f134fa' -model_hash = '4f3dfc9e6a3416532c2345d57909b7c51d115d9a3236adec91978591db6a114d' +version = '1.94.0' +source_hash = '5c4ab3ccb7a2e4177dd4bafbd56c56c36193a71d25787187c0adbf4436cbbc74' +model_hash = 'dcfeffc006a4b6562dee9669b6101c222d6cdcd7ab762a0e3c2c643103776675' [crates.aws-sdk-codeartifact] category = 'AwsSdk' @@ -1510,9 +1510,9 @@ model_hash = 'c22b529e96850ae5bc26257984b8cef35359d8663dfbcdf482dddf6da37b1ed9' [crates.aws-sdk-mediaconvert] category = 'AwsSdk' -version = '1.95.0' -source_hash = '1507b578703f39f10ded7849d996d2c6cf7a502dbe8c07b1fae5986ad3272b15' -model_hash = '912af2c3adf1c71bff4f1d8ad56bd113d7313f8295c0030b57946356217c18e1' +version = '1.96.0' +source_hash = '6d322092661d68a1a8b5b4ed8e0fdeb59a3484ebd0e46b2e3c1c0861d221bdf2' +model_hash = 'aa3199dedd14fe9506775124f7b69dc64382c3211c950c0c182bf709ca89cc56' [crates.aws-sdk-medialive] category = 'AwsSdk' @@ -1744,9 +1744,9 @@ model_hash = '4d2917fd5fa3eb1748b771abcc3273a96d4646cd7715aa010eb7e98c766648d6' [crates.aws-sdk-outposts] category = 'AwsSdk' -version = '1.84.0' -source_hash = '720092751e00f05c6bbd52f67d5bfad55f30ee673ad6f81f32487a2a0d44ba70' -model_hash = 'd8e6e8544457696bee179db2ec0345887ac6521ddbf96518ccfdf4846bb2bbfa' +version = '1.85.0' +source_hash = '82af21b6db3e351c56559b23b6fb9d6c67eb95435d6ecea6d851bd7f90899ee0' +model_hash = '22ef3ad2d350a98f65fb620167cdcfbee5cf08e157644941440ff6daca88a405' [crates.aws-sdk-panorama] category = 'AwsSdk' @@ -2194,9 +2194,9 @@ model_hash = '7ea630c51f0d837fd35192e6934ea08c17d2c67eb785cb84eabe95dc46e27ca7' [crates.aws-sdk-sesv2] category = 'AwsSdk' -version = '1.87.0' -source_hash = '274c064e6b2b4294d2a16b1d32fac0a84f2fa5f65aaee69f1c476f33104a92e3' -model_hash = 'f3aed2f0a555690f40eec1686465157be9e803e82c052df2a6acd001338ecc80' +version = '1.88.0' +source_hash = 'dec0316200beb8488aa5a8f8df6de8f5325c72060421c3cd320553ee09e94e2e' +model_hash = 'b865368edc9691a7cd0b7a5e98ee256420456e61533020a984ae81331fa06232' [crates.aws-sdk-sfn] category = 'AwsSdk' @@ -2260,9 +2260,9 @@ model_hash = '427510dc9329fdb7107e160387c1051deb1062215c400ecc8f7a2f0d1aee0d61' [crates.aws-sdk-ssm] category = 'AwsSdk' -version = '1.85.0' -source_hash = '95a0c4e3e87d375a197f8b90bb6abd82323881a82f09fed09e0373c435b892fa' -model_hash = '5e477df120d5bdead833ec884d51e19bfcc0b440f266b57708f24b4ad48b9594' +version = '1.85.1' +source_hash = 'a42278b45298999f934bd72f6dbca599a0bc68849f58f9c2f9011f68a161ec64' +model_hash = 'b9eca8522247ba8e63adb5d4e0d4459ed3b273ac5c841f7a82a6c2a4ed5e8b35' [crates.aws-sdk-ssmcontacts] category = 'AwsSdk' @@ -2677,14 +2677,12 @@ version = '0.0.0' source_hash = '7581e2c90ebb2b84dc1a89353651a8f020eae052ef4c14474acb5f86fa15f0e5' [release] -tag = 'release-2025-07-17' +tag = 'release-2025-07-18' [release.crates] -aws-sdk-cleanroomsml = '1.78.0' -aws-sdk-cloudfront = '1.83.1' -aws-sdk-ec2 = '1.148.0' -aws-sdk-keyspacesstreams = '1.3.1' -aws-sdk-mailmanager = '1.59.0' -aws-sdk-mediaconvert = '1.95.0' -aws-sdk-sfn = '1.80.1' -aws-sdk-synthetics = '1.84.0' +aws-sdk-auditmanager = '1.79.0' +aws-sdk-cloudwatchlogs = '1.94.0' +aws-sdk-mediaconvert = '1.96.0' +aws-sdk-outposts = '1.85.0' +aws-sdk-sesv2 = '1.88.0' +aws-sdk-ssm = '1.85.1'