From 6bfa212e955f4ac50891fbabd9c66e1cb38dcaf9 Mon Sep 17 00:00:00 2001 From: AWS SDK Swift Automation Date: Mon, 23 Sep 2024 18:54:24 +0000 Subject: [PATCH] chore: Updates version to 1.0.4 --- Package.version | 2 +- Package.version.next | 2 +- .../Sources/AWSAPIGateway/Models.swift | 8 +- .../AWSAthena/Sources/AWSAthena/Models.swift | 422 +++++++++++++++--- .../Sources/AWSBedrockAgent/Models.swift | 10 +- .../AWSEC2/Sources/AWSEC2/EC2Client.swift | 6 +- .../AWSEC2/Sources/AWSEC2/Models.swift | 36 +- .../Sources/AWSEMRServerless/Models.swift | 70 +++ .../AWSGlue/Sources/AWSGlue/Models.swift | 15 + .../AWSRDS/Sources/AWSRDS/Models.swift | 125 +++--- .../AWSRDS/Sources/AWSRDS/RDSClient.swift | 4 +- .../Sources/AWSResourceExplorer2/Models.swift | 252 +++++++---- .../AWSResourceExplorer2/Paginators.swift | 32 ++ .../ResourceExplorer2Client.swift | 76 +++- 14 files changed, 835 insertions(+), 225 deletions(-) diff --git a/Package.version b/Package.version index e4c0d46e55f..a6a3a43c3a0 100644 --- a/Package.version +++ b/Package.version @@ -1 +1 @@ -1.0.3 \ No newline at end of file +1.0.4 \ No newline at end of file diff --git a/Package.version.next b/Package.version.next index a6a3a43c3a0..1464c521f9e 100644 --- a/Package.version.next +++ b/Package.version.next @@ -1 +1 @@ -1.0.4 \ No newline at end of file +1.0.5 \ No newline at end of file diff --git a/Sources/Services/AWSAPIGateway/Sources/AWSAPIGateway/Models.swift b/Sources/Services/AWSAPIGateway/Sources/AWSAPIGateway/Models.swift index 2a144af0f43..09296cfa77f 100644 --- a/Sources/Services/AWSAPIGateway/Sources/AWSAPIGateway/Models.swift +++ b/Sources/Services/AWSAPIGateway/Sources/AWSAPIGateway/Models.swift @@ -1431,7 +1431,7 @@ public struct CreateDomainNameOutput { public var certificateArn: Swift.String? /// The name of the certificate that will be used by edge-optimized endpoint for this domain name. public var certificateName: Swift.String? - /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. + /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate. public var certificateUploadDate: Foundation.Date? /// The domain name of the Amazon CloudFront distribution associated with this custom domain name for an edge-optimized endpoint. You set up this association when adding a DNS record pointing the custom domain name to this distribution name. For more information about CloudFront distributions, see the Amazon CloudFront documentation. public var distributionDomainName: Swift.String? @@ -4080,7 +4080,7 @@ public struct GetDomainNameOutput { public var certificateArn: Swift.String? /// The name of the certificate that will be used by edge-optimized endpoint for this domain name. public var certificateName: Swift.String? - /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. + /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate. public var certificateUploadDate: Foundation.Date? /// The domain name of the Amazon CloudFront distribution associated with this custom domain name for an edge-optimized endpoint. You set up this association when adding a DNS record pointing the custom domain name to this distribution name. For more information about CloudFront distributions, see the Amazon CloudFront documentation. public var distributionDomainName: Swift.String? @@ -4175,7 +4175,7 @@ extension APIGatewayClientTypes { public var certificateArn: Swift.String? /// The name of the certificate that will be used by edge-optimized endpoint for this domain name. public var certificateName: Swift.String? - /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. + /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate. public var certificateUploadDate: Foundation.Date? /// The domain name of the Amazon CloudFront distribution associated with this custom domain name for an edge-optimized endpoint. You set up this association when adding a DNS record pointing the custom domain name to this distribution name. For more information about CloudFront distributions, see the Amazon CloudFront documentation. public var distributionDomainName: Swift.String? @@ -7599,7 +7599,7 @@ public struct UpdateDomainNameOutput { public var certificateArn: Swift.String? /// The name of the certificate that will be used by edge-optimized endpoint for this domain name. public var certificateName: Swift.String? - /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. + /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate. public var certificateUploadDate: Foundation.Date? /// The domain name of the Amazon CloudFront distribution associated with this custom domain name for an edge-optimized endpoint. You set up this association when adding a DNS record pointing the custom domain name to this distribution name. For more information about CloudFront distributions, see the Amazon CloudFront documentation. public var distributionDomainName: Swift.String? diff --git a/Sources/Services/AWSAthena/Sources/AWSAthena/Models.swift b/Sources/Services/AWSAthena/Sources/AWSAthena/Models.swift index c9fe8d0f99d..35bcc7ac9c9 100644 --- a/Sources/Services/AWSAthena/Sources/AWSAthena/Models.swift +++ b/Sources/Services/AWSAthena/Sources/AWSAthena/Models.swift @@ -901,6 +901,7 @@ public struct CreateCapacityReservationOutput { extension AthenaClientTypes { public enum DataCatalogType: Swift.Equatable, Swift.RawRepresentable, Swift.CaseIterable, Swift.Hashable { + case federated case glue case hive case lambda @@ -908,6 +909,7 @@ extension AthenaClientTypes { public static var allCases: [DataCatalogType] { return [ + .federated, .glue, .hive, .lambda @@ -921,6 +923,7 @@ extension AthenaClientTypes { public var rawValue: Swift.String { switch self { + case .federated: return "FEDERATED" case .glue: return "GLUE" case .hive: return "HIVE" case .lambda: return "LAMBDA" @@ -952,10 +955,21 @@ public struct CreateDataCatalogInput { /// * The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id /// /// * The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. + /// + /// + /// + /// + /// * The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection. + /// + /// * connection-arn: + /// + /// * lambda-role-arn (optional): The execution role to use for the Lambda function. If not provided, one is created. + /// + /// * connection-type:MYSQL|REDSHIFT|...., connection-properties:"" For , use escaped JSON text, as in the following example. "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" public var parameters: [Swift.String: Swift.String]? /// A list of comma separated tags to add to the data catalog that is created. public var tags: [AthenaClientTypes.Tag]? - /// The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. + /// The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass. /// This member is required. public var type: AthenaClientTypes.DataCatalogType? @@ -975,9 +989,273 @@ public struct CreateDataCatalogInput { } } +extension AthenaClientTypes { + + public enum ConnectionType: Swift.Equatable, Swift.RawRepresentable, Swift.CaseIterable, Swift.Hashable { + case bigquery + case clouderahive + case clouderaimpala + case cloudwatch + case cloudwatchmetrics + case cmdb + case datalakegen2 + case db2 + case db2as400 + case documentdb + case dynamodb + case googlecloudstorage + case hbase + case hortonworkshive + case msk + case mysql + case neptune + case opensearch + case oracle + case postgresql + case redis + case redshift + case saphana + case snowflake + case sqlserver + case synapse + case teradata + case timestream + case tpcds + case vertica + case sdkUnknown(Swift.String) + + public static var allCases: [ConnectionType] { + return [ + .bigquery, + .clouderahive, + .clouderaimpala, + .cloudwatch, + .cloudwatchmetrics, + .cmdb, + .datalakegen2, + .db2, + .db2as400, + .documentdb, + .dynamodb, + .googlecloudstorage, + .hbase, + .hortonworkshive, + .msk, + .mysql, + .neptune, + .opensearch, + .oracle, + .postgresql, + .redis, + .redshift, + .saphana, + .snowflake, + .sqlserver, + .synapse, + .teradata, + .timestream, + .tpcds, + .vertica + ] + } + + public init?(rawValue: Swift.String) { + let value = Self.allCases.first(where: { $0.rawValue == rawValue }) + self = value ?? Self.sdkUnknown(rawValue) + } + + public var rawValue: Swift.String { + switch self { + case .bigquery: return "BIGQUERY" + case .clouderahive: return "CLOUDERAHIVE" + case .clouderaimpala: return "CLOUDERAIMPALA" + case .cloudwatch: return "CLOUDWATCH" + case .cloudwatchmetrics: return "CLOUDWATCHMETRICS" + case .cmdb: return "CMDB" + case .datalakegen2: return "DATALAKEGEN2" + case .db2: return "DB2" + case .db2as400: return "DB2AS400" + case .documentdb: return "DOCUMENTDB" + case .dynamodb: return "DYNAMODB" + case .googlecloudstorage: return "GOOGLECLOUDSTORAGE" + case .hbase: return "HBASE" + case .hortonworkshive: return "HORTONWORKSHIVE" + case .msk: return "MSK" + case .mysql: return "MYSQL" + case .neptune: return "NEPTUNE" + case .opensearch: return "OPENSEARCH" + case .oracle: return "ORACLE" + case .postgresql: return "POSTGRESQL" + case .redis: return "REDIS" + case .redshift: return "REDSHIFT" + case .saphana: return "SAPHANA" + case .snowflake: return "SNOWFLAKE" + case .sqlserver: return "SQLSERVER" + case .synapse: return "SYNAPSE" + case .teradata: return "TERADATA" + case .timestream: return "TIMESTREAM" + case .tpcds: return "TPCDS" + case .vertica: return "VERTICA" + case let .sdkUnknown(s): return s + } + } + } +} + +extension AthenaClientTypes { + + public enum DataCatalogStatus: Swift.Equatable, Swift.RawRepresentable, Swift.CaseIterable, Swift.Hashable { + case createComplete + case createFailed + case createFailedCleanupComplete + case createFailedCleanupFailed + case createFailedCleanupInProgress + case createInProgress + case deleteComplete + case deleteFailed + case deleteInProgress + case sdkUnknown(Swift.String) + + public static var allCases: [DataCatalogStatus] { + return [ + .createComplete, + .createFailed, + .createFailedCleanupComplete, + .createFailedCleanupFailed, + .createFailedCleanupInProgress, + .createInProgress, + .deleteComplete, + .deleteFailed, + .deleteInProgress + ] + } + + public init?(rawValue: Swift.String) { + let value = Self.allCases.first(where: { $0.rawValue == rawValue }) + self = value ?? Self.sdkUnknown(rawValue) + } + + public var rawValue: Swift.String { + switch self { + case .createComplete: return "CREATE_COMPLETE" + case .createFailed: return "CREATE_FAILED" + case .createFailedCleanupComplete: return "CREATE_FAILED_CLEANUP_COMPLETE" + case .createFailedCleanupFailed: return "CREATE_FAILED_CLEANUP_FAILED" + case .createFailedCleanupInProgress: return "CREATE_FAILED_CLEANUP_IN_PROGRESS" + case .createInProgress: return "CREATE_IN_PROGRESS" + case .deleteComplete: return "DELETE_COMPLETE" + case .deleteFailed: return "DELETE_FAILED" + case .deleteInProgress: return "DELETE_IN_PROGRESS" + case let .sdkUnknown(s): return s + } + } + } +} + +extension AthenaClientTypes { + /// Contains information about a data catalog in an Amazon Web Services account. In the Athena console, data catalogs are listed as "data sources" on the Data sources page under the Data source name column. + public struct DataCatalog { + /// The type of connection for a FEDERATED data catalog (for example, REDSHIFT, MYSQL, or SQLSERVER). For information about individual connectors, see [Available data source connectors](https://docs.aws.amazon.com/athena/latest/ug/connectors-available.html). + public var connectionType: AthenaClientTypes.ConnectionType? + /// An optional description of the data catalog. + public var description: Swift.String? + /// Text of the error that occurred during data catalog creation or deletion. + public var error: Swift.String? + /// The name of the data catalog. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 127 alphanumeric, underscore, at sign, or hyphen characters. The remainder of the length constraint of 256 is reserved for use by Athena. + /// This member is required. + public var name: Swift.String? + /// Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type. + /// + /// * For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number + /// + /// * For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. + /// + /// * If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn + /// + /// * If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn + /// + /// + /// + /// + /// * The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue catalog belongs. catalog-id=catalog_id + /// + /// * The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. + /// + /// + /// + /// + /// * The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection. + /// + /// * connection-arn: + /// + /// * connection-type:MYSQL|REDSHIFT|...., connection-properties:"" For , use escaped JSON text, as in the following example. "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" + public var parameters: [Swift.String: Swift.String]? + /// The status of the creation or deletion of the data catalog. + /// + /// * The LAMBDA, GLUE, and HIVE data catalog types are created synchronously. Their status is either CREATE_COMPLETE or CREATE_FAILED. + /// + /// * The FEDERATED data catalog type is created asynchronously. + /// + /// + /// Data catalog creation status: + /// + /// * CREATE_IN_PROGRESS: Federated data catalog creation in progress. + /// + /// * CREATE_COMPLETE: Data catalog creation complete. + /// + /// * CREATE_FAILED: Data catalog could not be created. + /// + /// * CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog creation failed and is being removed. + /// + /// * CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation failed and was removed. + /// + /// * CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation failed but could not be removed. + /// + /// + /// Data catalog deletion status: + /// + /// * DELETE_IN_PROGRESS: Federated data catalog deletion in progress. + /// + /// * DELETE_COMPLETE: Federated data catalog deleted. + /// + /// * DELETE_FAILED: Federated data catalog could not be deleted. + public var status: AthenaClientTypes.DataCatalogStatus? + /// The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass. + /// This member is required. + public var type: AthenaClientTypes.DataCatalogType? + + public init( + connectionType: AthenaClientTypes.ConnectionType? = nil, + description: Swift.String? = nil, + error: Swift.String? = nil, + name: Swift.String? = nil, + parameters: [Swift.String: Swift.String]? = nil, + status: AthenaClientTypes.DataCatalogStatus? = nil, + type: AthenaClientTypes.DataCatalogType? = nil + ) + { + self.connectionType = connectionType + self.description = description + self.error = error + self.name = name + self.parameters = parameters + self.status = status + self.type = type + } + } + +} + public struct CreateDataCatalogOutput { + /// Contains information about a data catalog in an Amazon Web Services account. In the Athena console, data catalogs are listed as "data sources" on the Data sources page under the Data source name column. + public var dataCatalog: AthenaClientTypes.DataCatalog? - public init() { } + public init( + dataCatalog: AthenaClientTypes.DataCatalog? = nil + ) + { + self.dataCatalog = dataCatalog + } } public struct CreateNamedQueryInput { @@ -1371,8 +1649,15 @@ public struct DeleteDataCatalogInput { } public struct DeleteDataCatalogOutput { + /// Contains information about a data catalog in an Amazon Web Services account. In the Athena console, data catalogs are listed as "data sources" on the Data sources page under the Data source name column. + public var dataCatalog: AthenaClientTypes.DataCatalog? - public init() { } + public init( + dataCatalog: AthenaClientTypes.DataCatalog? = nil + ) + { + self.dataCatalog = dataCatalog + } } public struct DeleteNamedQueryInput { @@ -2109,51 +2394,6 @@ public struct GetDataCatalogInput { } } -extension AthenaClientTypes { - /// Contains information about a data catalog in an Amazon Web Services account. In the Athena console, data catalogs are listed as "data sources" on the Data sources page under the Data source name column. - public struct DataCatalog { - /// An optional description of the data catalog. - public var description: Swift.String? - /// The name of the data catalog. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 127 alphanumeric, underscore, at sign, or hyphen characters. The remainder of the length constraint of 256 is reserved for use by Athena. - /// This member is required. - public var name: Swift.String? - /// Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type. - /// - /// * For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number - /// - /// * For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. - /// - /// * If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn - /// - /// * If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn - /// - /// - /// - /// - /// * The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue catalog belongs. catalog-id=catalog_id - /// - /// * The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. - public var parameters: [Swift.String: Swift.String]? - /// The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. - /// This member is required. - public var type: AthenaClientTypes.DataCatalogType? - - public init( - description: Swift.String? = nil, - name: Swift.String? = nil, - parameters: [Swift.String: Swift.String]? = nil, - type: AthenaClientTypes.DataCatalogType? = nil - ) - { - self.description = description - self.name = name - self.parameters = parameters - self.type = type - } - } - -} - public struct GetDataCatalogOutput { /// The data catalog returned. public var dataCatalog: AthenaClientTypes.DataCatalog? @@ -3248,15 +3488,55 @@ extension AthenaClientTypes { public struct DataCatalogSummary { /// The name of the data catalog. The catalog name is unique for the Amazon Web Services account and can use a maximum of 127 alphanumeric, underscore, at sign, or hyphen characters. The remainder of the length constraint of 256 is reserved for use by Athena. public var catalogName: Swift.String? + /// The type of connection for a FEDERATED data catalog (for example, REDSHIFT, MYSQL, or SQLSERVER). For information about individual connectors, see [Available data source connectors](https://docs.aws.amazon.com/athena/latest/ug/connectors-available.html). + public var connectionType: AthenaClientTypes.ConnectionType? + /// Text of the error that occurred during data catalog creation or deletion. + public var error: Swift.String? + /// The status of the creation or deletion of the data catalog. + /// + /// * The LAMBDA, GLUE, and HIVE data catalog types are created synchronously. Their status is either CREATE_COMPLETE or CREATE_FAILED. + /// + /// * The FEDERATED data catalog type is created asynchronously. + /// + /// + /// Data catalog creation status: + /// + /// * CREATE_IN_PROGRESS: Federated data catalog creation in progress. + /// + /// * CREATE_COMPLETE: Data catalog creation complete. + /// + /// * CREATE_FAILED: Data catalog could not be created. + /// + /// * CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog creation failed and is being removed. + /// + /// * CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation failed and was removed. + /// + /// * CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation failed but could not be removed. + /// + /// + /// Data catalog deletion status: + /// + /// * DELETE_IN_PROGRESS: Federated data catalog deletion in progress. + /// + /// * DELETE_COMPLETE: Federated data catalog deleted. + /// + /// * DELETE_FAILED: Federated data catalog could not be deleted. + public var status: AthenaClientTypes.DataCatalogStatus? /// The data catalog type. public var type: AthenaClientTypes.DataCatalogType? public init( catalogName: Swift.String? = nil, + connectionType: AthenaClientTypes.ConnectionType? = nil, + error: Swift.String? = nil, + status: AthenaClientTypes.DataCatalogStatus? = nil, type: AthenaClientTypes.DataCatalogType? = nil ) { self.catalogName = catalogName + self.connectionType = connectionType + self.error = error + self.status = status self.type = type } } @@ -5885,7 +6165,12 @@ extension CreateCapacityReservationOutput { extension CreateDataCatalogOutput { static func httpOutput(from httpResponse: SmithyHTTPAPI.HTTPResponse) async throws -> CreateDataCatalogOutput { - return CreateDataCatalogOutput() + let data = try await httpResponse.data() + let responseReader = try SmithyJSON.Reader.from(data: data) + let reader = responseReader + var value = CreateDataCatalogOutput() + value.dataCatalog = try reader["DataCatalog"].readIfPresent(with: AthenaClientTypes.DataCatalog.read(from:)) + return value } } @@ -5951,7 +6236,12 @@ extension DeleteCapacityReservationOutput { extension DeleteDataCatalogOutput { static func httpOutput(from httpResponse: SmithyHTTPAPI.HTTPResponse) async throws -> DeleteDataCatalogOutput { - return DeleteDataCatalogOutput() + let data = try await httpResponse.data() + let responseReader = try SmithyJSON.Reader.from(data: data) + let reader = responseReader + var value = DeleteDataCatalogOutput() + value.dataCatalog = try reader["DataCatalog"].readIfPresent(with: AthenaClientTypes.DataCatalog.read(from:)) + return value } } @@ -7996,6 +8286,22 @@ extension AthenaClientTypes.UnprocessedQueryExecutionId { } } +extension AthenaClientTypes.DataCatalog { + + static func read(from reader: SmithyJSON.Reader) throws -> AthenaClientTypes.DataCatalog { + guard reader.hasContent else { throw SmithyReadWrite.ReaderError.requiredValueNotPresent } + var value = AthenaClientTypes.DataCatalog() + value.name = try reader["Name"].readIfPresent() ?? "" + value.description = try reader["Description"].readIfPresent() + value.type = try reader["Type"].readIfPresent() ?? .sdkUnknown("") + value.parameters = try reader["Parameters"].readMapIfPresent(valueReadingClosure: SmithyReadWrite.ReadingClosures.readString(from:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) + value.status = try reader["Status"].readIfPresent() + value.connectionType = try reader["ConnectionType"].readIfPresent() + value.error = try reader["Error"].readIfPresent() + return value + } +} + extension AthenaClientTypes.NotebookMetadata { static func read(from reader: SmithyJSON.Reader) throws -> AthenaClientTypes.NotebookMetadata { @@ -8115,19 +8421,6 @@ extension AthenaClientTypes.Database { } } -extension AthenaClientTypes.DataCatalog { - - static func read(from reader: SmithyJSON.Reader) throws -> AthenaClientTypes.DataCatalog { - guard reader.hasContent else { throw SmithyReadWrite.ReaderError.requiredValueNotPresent } - var value = AthenaClientTypes.DataCatalog() - value.name = try reader["Name"].readIfPresent() ?? "" - value.description = try reader["Description"].readIfPresent() - value.type = try reader["Type"].readIfPresent() ?? .sdkUnknown("") - value.parameters = try reader["Parameters"].readMapIfPresent(valueReadingClosure: SmithyReadWrite.ReadingClosures.readString(from:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) - return value - } -} - extension AthenaClientTypes.ResultSet { static func read(from reader: SmithyJSON.Reader) throws -> AthenaClientTypes.ResultSet { @@ -8462,6 +8755,9 @@ extension AthenaClientTypes.DataCatalogSummary { var value = AthenaClientTypes.DataCatalogSummary() value.catalogName = try reader["CatalogName"].readIfPresent() value.type = try reader["Type"].readIfPresent() + value.status = try reader["Status"].readIfPresent() + value.connectionType = try reader["ConnectionType"].readIfPresent() + value.error = try reader["Error"].readIfPresent() return value } } diff --git a/Sources/Services/AWSBedrockAgent/Sources/AWSBedrockAgent/Models.swift b/Sources/Services/AWSBedrockAgent/Sources/AWSBedrockAgent/Models.swift index 0d8d970b83b..a0d0c8decac 100644 --- a/Sources/Services/AWSBedrockAgent/Sources/AWSBedrockAgent/Models.swift +++ b/Sources/Services/AWSBedrockAgent/Sources/AWSBedrockAgent/Models.swift @@ -1152,7 +1152,7 @@ extension BedrockAgentClientTypes.PromptConfiguration: Swift.CustomDebugStringCo extension BedrockAgentClientTypes { /// Contains configurations to override prompts in different parts of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html). public struct PromptOverrideConfiguration { - /// The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see [Parser Lambda function in Agents for Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html). + /// The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see [Parser Lambda function in Amazon Bedrock Agents](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html). public var overrideLambda: Swift.String? /// Contains configurations to override a prompt template in one part of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html). /// This member is required. @@ -1637,7 +1637,7 @@ public struct CreateAgentInput { public var customerEncryptionKeyArn: Swift.String? /// A description of the agent. public var description: Swift.String? - /// The foundation model to be used for orchestration by the agent you create. + /// The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create. public var foundationModel: Swift.String? /// The unique Guardrail configuration assigned to the agent when it is created. public var guardrailConfiguration: BedrockAgentClientTypes.GuardrailConfiguration? @@ -4021,7 +4021,7 @@ extension BedrockAgentClientTypes { /// The unique identifier of the knowledge base to query. /// This member is required. public var knowledgeBaseId: Swift.String? - /// The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. + /// The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. public var modelId: Swift.String? public init( @@ -4210,7 +4210,7 @@ extension BedrockAgentClientTypes { public struct PromptFlowNodeInlineConfiguration { /// Contains inference configurations for the prompt. public var inferenceConfiguration: BedrockAgentClientTypes.PromptInferenceConfiguration? - /// The unique identifier of the model to run inference with. + /// The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to run inference with. /// This member is required. public var modelId: Swift.String? /// Contains a prompt and variables in the prompt that can be replaced with values at runtime. @@ -7297,7 +7297,7 @@ extension BedrockAgentClientTypes { public var inferenceConfiguration: BedrockAgentClientTypes.PromptInferenceConfiguration? /// An array of objects, each containing a key-value pair that defines a metadata tag and value to attach to a prompt variant. For more information, see [Create a prompt using Prompt management](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management-create.html). public var metadata: [BedrockAgentClientTypes.PromptMetadataEntry]? - /// The unique identifier of the model with which to run inference on the prompt. + /// The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) with which to run inference on the prompt. public var modelId: Swift.String? /// The name of the prompt variant. /// This member is required. diff --git a/Sources/Services/AWSEC2/Sources/AWSEC2/EC2Client.swift b/Sources/Services/AWSEC2/Sources/AWSEC2/EC2Client.swift index 3b77646241c..76fcceb97fe 100644 --- a/Sources/Services/AWSEC2/Sources/AWSEC2/EC2Client.swift +++ b/Sources/Services/AWSEC2/Sources/AWSEC2/EC2Client.swift @@ -3035,7 +3035,7 @@ extension EC2Client { /// Performs the `CancelConversionTask` operation on the `AmazonEC2` service. /// - /// Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception. For more information, see [Importing a Virtual Machine Using the Amazon EC2 CLI](https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ec2-cli-vmimport-export.html). + /// Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception. /// /// - Parameter CancelConversionTaskInput : [no documentation found] /// @@ -31355,7 +31355,7 @@ extension EC2Client { /// Performs the `ImportInstance` operation on the `AmazonEC2` service. /// - /// We recommend that you use the [ImportImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportImage.html) API. For more information, see [Importing a VM as an image using VM Import/Export](https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html) in the VM Import/Export User Guide. Creates an import instance task using metadata from the specified disk image. This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see [Importing a VM to Amazon EC2](https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#UsingVirtualMachinesinAmazonEC2) in the Amazon EC2 CLI Reference PDF file. This API action supports only single-volume VMs. To import multi-volume VMs, use [ImportImage] instead. For information about the import manifest referenced by this API action, see [VM Import Manifest](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). + /// We recommend that you use the [ImportImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportImage.html) API instead. For more information, see [Importing a VM as an image using VM Import/Export](https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html) in the VM Import/Export User Guide. Creates an import instance task using metadata from the specified disk image. This API action supports only single-volume VMs. To import multi-volume VMs, use [ImportImage] instead. For information about the import manifest referenced by this API action, see [VM Import Manifest](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). This API action is not supported by the Command Line Interface (CLI). /// /// - Parameter ImportInstanceInput : [no documentation found] /// @@ -31547,7 +31547,7 @@ extension EC2Client { /// Performs the `ImportVolume` operation on the `AmazonEC2` service. /// - /// Creates an import volume task using metadata from the specified disk image. This API action supports only single-volume VMs. To import multi-volume VMs, use [ImportImage] instead. To import a disk to a snapshot, use [ImportSnapshot] instead. This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see [Importing Disks to Amazon EBS](https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#importing-your-volumes-into-amazon-ebs) in the Amazon EC2 CLI Reference PDF file. For information about the import manifest referenced by this API action, see [VM Import Manifest](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). + /// This API action supports only single-volume VMs. To import multi-volume VMs, use [ImportImage] instead. To import a disk to a snapshot, use [ImportSnapshot] instead. Creates an import volume task using metadata from the specified disk image. For information about the import manifest referenced by this API action, see [VM Import Manifest](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). This API action is not supported by the Command Line Interface (CLI). /// /// - Parameter ImportVolumeInput : [no documentation found] /// diff --git a/Sources/Services/AWSEC2/Sources/AWSEC2/Models.swift b/Sources/Services/AWSEC2/Sources/AWSEC2/Models.swift index 9c4496c807e..05efb4dc306 100644 --- a/Sources/Services/AWSEC2/Sources/AWSEC2/Models.swift +++ b/Sources/Services/AWSEC2/Sources/AWSEC2/Models.swift @@ -9561,6 +9561,14 @@ extension EC2ClientTypes { case g64xlarge case g68xlarge case g6Xlarge + case g6e12xlarge + case g6e16xlarge + case g6e24xlarge + case g6e2xlarge + case g6e48xlarge + case g6e4xlarge + case g6e8xlarge + case g6eXlarge case gr64xlarge case gr68xlarge case h116xlarge @@ -10385,6 +10393,14 @@ extension EC2ClientTypes { .g64xlarge, .g68xlarge, .g6Xlarge, + .g6e12xlarge, + .g6e16xlarge, + .g6e24xlarge, + .g6e2xlarge, + .g6e48xlarge, + .g6e4xlarge, + .g6e8xlarge, + .g6eXlarge, .gr64xlarge, .gr68xlarge, .h116xlarge, @@ -11215,6 +11231,14 @@ extension EC2ClientTypes { case .g64xlarge: return "g6.4xlarge" case .g68xlarge: return "g6.8xlarge" case .g6Xlarge: return "g6.xlarge" + case .g6e12xlarge: return "g6e.12xlarge" + case .g6e16xlarge: return "g6e.16xlarge" + case .g6e24xlarge: return "g6e.24xlarge" + case .g6e2xlarge: return "g6e.2xlarge" + case .g6e48xlarge: return "g6e.48xlarge" + case .g6e4xlarge: return "g6e.4xlarge" + case .g6e8xlarge: return "g6e.8xlarge" + case .g6eXlarge: return "g6e.xlarge" case .gr64xlarge: return "gr6.4xlarge" case .gr68xlarge: return "gr6.8xlarge" case .h116xlarge: return "h1.16xlarge" @@ -14214,12 +14238,16 @@ extension EC2ClientTypes { extension EC2ClientTypes { public enum FleetCapacityReservationUsageStrategy: Swift.Equatable, Swift.RawRepresentable, Swift.CaseIterable, Swift.Hashable { + case `none` case useCapacityReservationsFirst + case useCapacityReservationsOnly case sdkUnknown(Swift.String) public static var allCases: [FleetCapacityReservationUsageStrategy] { return [ - .useCapacityReservationsFirst + .none, + .useCapacityReservationsFirst, + .useCapacityReservationsOnly ] } @@ -14230,7 +14258,9 @@ extension EC2ClientTypes { public var rawValue: Swift.String { switch self { + case .none: return "none" case .useCapacityReservationsFirst: return "use-capacity-reservations-first" + case .useCapacityReservationsOnly: return "use-capacity-reservations-only" case let .sdkUnknown(s): return s } } @@ -33134,10 +33164,8 @@ public struct DescribeCapacityBlockOfferingsInput { /// The latest end date for the Capacity Block offering. public var endDateRange: Foundation.Date? /// The number of instances for which to reserve capacity. - /// This member is required. public var instanceCount: Swift.Int? /// The type of instance for which the Capacity Block offering reserves capacity. - /// This member is required. public var instanceType: Swift.String? /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see [Pagination](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). public var maxResults: Swift.Int? @@ -38459,7 +38487,7 @@ public struct DescribeImportSnapshotTasksInput { extension EC2ClientTypes { /// Details about the import snapshot task. public struct SnapshotTaskDetail { - /// The description of the snapshot. + /// The description of the disk image being imported. public var description: Swift.String? /// The size of the disk in the snapshot, in GiB. public var diskImageSize: Swift.Double? diff --git a/Sources/Services/AWSEMRServerless/Sources/AWSEMRServerless/Models.swift b/Sources/Services/AWSEMRServerless/Sources/AWSEMRServerless/Models.swift index ac13de64f97..0d1a319ec8f 100644 --- a/Sources/Services/AWSEMRServerless/Sources/AWSEMRServerless/Models.swift +++ b/Sources/Services/AWSEMRServerless/Sources/AWSEMRServerless/Models.swift @@ -354,6 +354,26 @@ extension EMRServerlessClientTypes { } +extension EMRServerlessClientTypes { + /// The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. + public struct SchedulerConfiguration { + /// The maximum concurrent job runs on this application. If scheduler configuration is enabled on your application, the default value is 15. The valid range is 1 to 1000. + public var maxConcurrentRuns: Swift.Int? + /// The maximum duration in minutes for the job in QUEUED state. If scheduler configuration is enabled on your application, the default value is 360 minutes (6 hours). The valid range is from 15 to 720. + public var queueTimeoutMinutes: Swift.Int? + + public init( + maxConcurrentRuns: Swift.Int? = nil, + queueTimeoutMinutes: Swift.Int? = nil + ) + { + self.maxConcurrentRuns = maxConcurrentRuns + self.queueTimeoutMinutes = queueTimeoutMinutes + } + } + +} + extension EMRServerlessClientTypes { public enum ApplicationState: Swift.Equatable, Swift.RawRepresentable, Swift.CaseIterable, Swift.Hashable { @@ -1000,6 +1020,7 @@ extension EMRServerlessClientTypes { case cancelling case failed case pending + case queued case running case scheduled case submitted @@ -1012,6 +1033,7 @@ extension EMRServerlessClientTypes { .cancelling, .failed, .pending, + .queued, .running, .scheduled, .submitted, @@ -1030,6 +1052,7 @@ extension EMRServerlessClientTypes { case .cancelling: return "CANCELLING" case .failed: return "FAILED" case .pending: return "PENDING" + case .queued: return "QUEUED" case .running: return "RUNNING" case .scheduled: return "SCHEDULED" case .submitted: return "SUBMITTED" @@ -1491,6 +1514,8 @@ extension EMRServerlessClientTypes { public var releaseLabel: Swift.String? /// The [Configuration](https://docs.aws.amazon.com/emr-serverless/latest/APIReference/API_Configuration.html) specifications of an application. Each configuration consists of a classification and properties. You use this parameter when creating or updating an application. To see the runtimeConfiguration object of an application, run the [GetApplication](https://docs.aws.amazon.com/emr-serverless/latest/APIReference/API_GetApplication.html) API operation. public var runtimeConfiguration: [EMRServerlessClientTypes.Configuration]? + /// The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. + public var schedulerConfiguration: EMRServerlessClientTypes.SchedulerConfiguration? /// The state of the application. /// This member is required. public var state: EMRServerlessClientTypes.ApplicationState? @@ -1523,6 +1548,7 @@ extension EMRServerlessClientTypes { networkConfiguration: EMRServerlessClientTypes.NetworkConfiguration? = nil, releaseLabel: Swift.String? = nil, runtimeConfiguration: [EMRServerlessClientTypes.Configuration]? = nil, + schedulerConfiguration: EMRServerlessClientTypes.SchedulerConfiguration? = nil, state: EMRServerlessClientTypes.ApplicationState? = nil, stateDetails: Swift.String? = nil, tags: [Swift.String: Swift.String]? = nil, @@ -1546,6 +1572,7 @@ extension EMRServerlessClientTypes { self.networkConfiguration = networkConfiguration self.releaseLabel = releaseLabel self.runtimeConfiguration = runtimeConfiguration + self.schedulerConfiguration = schedulerConfiguration self.state = state self.stateDetails = stateDetails self.tags = tags @@ -1606,6 +1633,8 @@ public struct CreateApplicationInput { public var releaseLabel: Swift.String? /// The [Configuration](https://docs.aws.amazon.com/emr-serverless/latest/APIReference/API_Configuration.html) specifications to use when creating an application. Each configuration consists of a classification and properties. This configuration is applied to all the job runs submitted under the application. public var runtimeConfiguration: [EMRServerlessClientTypes.Configuration]? + /// The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. + public var schedulerConfiguration: EMRServerlessClientTypes.SchedulerConfiguration? /// The tags assigned to the application. public var tags: [Swift.String: Swift.String]? /// The type of application you want to start, such as Spark or Hive. @@ -1628,6 +1657,7 @@ public struct CreateApplicationInput { networkConfiguration: EMRServerlessClientTypes.NetworkConfiguration? = nil, releaseLabel: Swift.String? = nil, runtimeConfiguration: [EMRServerlessClientTypes.Configuration]? = nil, + schedulerConfiguration: EMRServerlessClientTypes.SchedulerConfiguration? = nil, tags: [Swift.String: Swift.String]? = nil, type: Swift.String? = nil, workerTypeSpecifications: [Swift.String: EMRServerlessClientTypes.WorkerTypeSpecificationInput]? = nil @@ -1646,6 +1676,7 @@ public struct CreateApplicationInput { self.networkConfiguration = networkConfiguration self.releaseLabel = releaseLabel self.runtimeConfiguration = runtimeConfiguration + self.schedulerConfiguration = schedulerConfiguration self.tags = tags self.type = type self.workerTypeSpecifications = workerTypeSpecifications @@ -1681,6 +1712,8 @@ public struct UpdateApplicationInput { public var releaseLabel: Swift.String? /// The [Configuration](https://docs.aws.amazon.com/emr-serverless/latest/APIReference/API_Configuration.html) specifications to use when updating an application. Each configuration consists of a classification and properties. This configuration is applied across all the job runs submitted under the application. public var runtimeConfiguration: [EMRServerlessClientTypes.Configuration]? + /// The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. + public var schedulerConfiguration: EMRServerlessClientTypes.SchedulerConfiguration? /// The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark applications and HiveDriver and TezTask for Hive applications. You can either set image details in this parameter for each worker type, or in imageConfiguration for all worker types. public var workerTypeSpecifications: [Swift.String: EMRServerlessClientTypes.WorkerTypeSpecificationInput]? @@ -1698,6 +1731,7 @@ public struct UpdateApplicationInput { networkConfiguration: EMRServerlessClientTypes.NetworkConfiguration? = nil, releaseLabel: Swift.String? = nil, runtimeConfiguration: [EMRServerlessClientTypes.Configuration]? = nil, + schedulerConfiguration: EMRServerlessClientTypes.SchedulerConfiguration? = nil, workerTypeSpecifications: [Swift.String: EMRServerlessClientTypes.WorkerTypeSpecificationInput]? = nil ) { @@ -1714,6 +1748,7 @@ public struct UpdateApplicationInput { self.networkConfiguration = networkConfiguration self.releaseLabel = releaseLabel self.runtimeConfiguration = runtimeConfiguration + self.schedulerConfiguration = schedulerConfiguration self.workerTypeSpecifications = workerTypeSpecifications } } @@ -1743,6 +1778,8 @@ extension EMRServerlessClientTypes { /// The user who created the job run. /// This member is required. public var createdBy: Swift.String? + /// The date and time when the job was terminated. + public var endedAt: Foundation.Date? /// The execution role ARN of the job run. /// This member is required. public var executionRole: Swift.String? @@ -1760,11 +1797,15 @@ extension EMRServerlessClientTypes { public var name: Swift.String? /// The network configuration for customer VPC connectivity. public var networkConfiguration: EMRServerlessClientTypes.NetworkConfiguration? + /// The total time for a job in the QUEUED state in milliseconds. + public var queuedDurationMilliseconds: Swift.Int? /// The Amazon EMR release associated with the application your job is running on. /// This member is required. public var releaseLabel: Swift.String? /// The retry policy of the job run. public var retryPolicy: EMRServerlessClientTypes.RetryPolicy? + /// The date and time when the job moved to the RUNNING state. + public var startedAt: Foundation.Date? /// The state of the job run. /// This member is required. public var state: EMRServerlessClientTypes.JobRunState? @@ -1791,6 +1832,7 @@ extension EMRServerlessClientTypes { configurationOverrides: EMRServerlessClientTypes.ConfigurationOverrides? = nil, createdAt: Foundation.Date? = nil, createdBy: Swift.String? = nil, + endedAt: Foundation.Date? = nil, executionRole: Swift.String? = nil, executionTimeoutMinutes: Swift.Int? = 0, jobDriver: EMRServerlessClientTypes.JobDriver? = nil, @@ -1798,8 +1840,10 @@ extension EMRServerlessClientTypes { mode: EMRServerlessClientTypes.JobRunMode? = nil, name: Swift.String? = nil, networkConfiguration: EMRServerlessClientTypes.NetworkConfiguration? = nil, + queuedDurationMilliseconds: Swift.Int? = nil, releaseLabel: Swift.String? = nil, retryPolicy: EMRServerlessClientTypes.RetryPolicy? = nil, + startedAt: Foundation.Date? = nil, state: EMRServerlessClientTypes.JobRunState? = nil, stateDetails: Swift.String? = nil, tags: [Swift.String: Swift.String]? = nil, @@ -1817,6 +1861,7 @@ extension EMRServerlessClientTypes { self.configurationOverrides = configurationOverrides self.createdAt = createdAt self.createdBy = createdBy + self.endedAt = endedAt self.executionRole = executionRole self.executionTimeoutMinutes = executionTimeoutMinutes self.jobDriver = jobDriver @@ -1824,8 +1869,10 @@ extension EMRServerlessClientTypes { self.mode = mode self.name = name self.networkConfiguration = networkConfiguration + self.queuedDurationMilliseconds = queuedDurationMilliseconds self.releaseLabel = releaseLabel self.retryPolicy = retryPolicy + self.startedAt = startedAt self.state = state self.stateDetails = stateDetails self.tags = tags @@ -2222,6 +2269,7 @@ extension CreateApplicationInput { try writer["networkConfiguration"].write(value.networkConfiguration, with: EMRServerlessClientTypes.NetworkConfiguration.write(value:to:)) try writer["releaseLabel"].write(value.releaseLabel) try writer["runtimeConfiguration"].writeList(value.runtimeConfiguration, memberWritingClosure: EMRServerlessClientTypes.Configuration.write(value:to:), memberNodeInfo: "member", isFlattened: false) + try writer["schedulerConfiguration"].write(value.schedulerConfiguration, with: EMRServerlessClientTypes.SchedulerConfiguration.write(value:to:)) try writer["tags"].writeMap(value.tags, valueWritingClosure: SmithyReadWrite.WritingClosures.writeString(value:to:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) try writer["type"].write(value.type) try writer["workerTypeSpecifications"].writeMap(value.workerTypeSpecifications, valueWritingClosure: EMRServerlessClientTypes.WorkerTypeSpecificationInput.write(value:to:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) @@ -2268,6 +2316,7 @@ extension UpdateApplicationInput { try writer["networkConfiguration"].write(value.networkConfiguration, with: EMRServerlessClientTypes.NetworkConfiguration.write(value:to:)) try writer["releaseLabel"].write(value.releaseLabel) try writer["runtimeConfiguration"].writeList(value.runtimeConfiguration, memberWritingClosure: EMRServerlessClientTypes.Configuration.write(value:to:), memberNodeInfo: "member", isFlattened: false) + try writer["schedulerConfiguration"].write(value.schedulerConfiguration, with: EMRServerlessClientTypes.SchedulerConfiguration.write(value:to:)) try writer["workerTypeSpecifications"].writeMap(value.workerTypeSpecifications, valueWritingClosure: EMRServerlessClientTypes.WorkerTypeSpecificationInput.write(value:to:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) } } @@ -2795,6 +2844,24 @@ extension EMRServerlessClientTypes.Application { value.runtimeConfiguration = try reader["runtimeConfiguration"].readListIfPresent(memberReadingClosure: EMRServerlessClientTypes.Configuration.read(from:), memberNodeInfo: "member", isFlattened: false) value.monitoringConfiguration = try reader["monitoringConfiguration"].readIfPresent(with: EMRServerlessClientTypes.MonitoringConfiguration.read(from:)) value.interactiveConfiguration = try reader["interactiveConfiguration"].readIfPresent(with: EMRServerlessClientTypes.InteractiveConfiguration.read(from:)) + value.schedulerConfiguration = try reader["schedulerConfiguration"].readIfPresent(with: EMRServerlessClientTypes.SchedulerConfiguration.read(from:)) + return value + } +} + +extension EMRServerlessClientTypes.SchedulerConfiguration { + + static func write(value: EMRServerlessClientTypes.SchedulerConfiguration?, to writer: SmithyJSON.Writer) throws { + guard let value else { return } + try writer["maxConcurrentRuns"].write(value.maxConcurrentRuns) + try writer["queueTimeoutMinutes"].write(value.queueTimeoutMinutes) + } + + static func read(from reader: SmithyJSON.Reader) throws -> EMRServerlessClientTypes.SchedulerConfiguration { + guard reader.hasContent else { throw SmithyReadWrite.ReaderError.requiredValueNotPresent } + var value = EMRServerlessClientTypes.SchedulerConfiguration() + value.queueTimeoutMinutes = try reader["queueTimeoutMinutes"].readIfPresent() + value.maxConcurrentRuns = try reader["maxConcurrentRuns"].readIfPresent() return value } } @@ -3084,6 +3151,9 @@ extension EMRServerlessClientTypes.JobRun { value.attempt = try reader["attempt"].readIfPresent() value.attemptCreatedAt = try reader["attemptCreatedAt"].readTimestampIfPresent(format: SmithyTimestamps.TimestampFormat.epochSeconds) value.attemptUpdatedAt = try reader["attemptUpdatedAt"].readTimestampIfPresent(format: SmithyTimestamps.TimestampFormat.epochSeconds) + value.startedAt = try reader["startedAt"].readTimestampIfPresent(format: SmithyTimestamps.TimestampFormat.epochSeconds) + value.endedAt = try reader["endedAt"].readTimestampIfPresent(format: SmithyTimestamps.TimestampFormat.epochSeconds) + value.queuedDurationMilliseconds = try reader["queuedDurationMilliseconds"].readIfPresent() return value } } diff --git a/Sources/Services/AWSGlue/Sources/AWSGlue/Models.swift b/Sources/Services/AWSGlue/Sources/AWSGlue/Models.swift index 7914e229c48..47cbc57cbb0 100644 --- a/Sources/Services/AWSGlue/Sources/AWSGlue/Models.swift +++ b/Sources/Services/AWSGlue/Sources/AWSGlue/Models.swift @@ -851,6 +851,11 @@ extension GlueClientTypes { } +extension GlueClientTypes.AuthorizationCodeProperties: Swift.CustomDebugStringConvertible { + public var debugDescription: Swift.String { + "AuthorizationCodeProperties(redirectUri: \(Swift.String(describing: redirectUri)), authorizationCode: \"CONTENT_REDACTED\")"} +} + extension GlueClientTypes { /// A structure containing properties for OAuth2 in the CreateConnection request. public struct OAuth2PropertiesInput { @@ -10249,6 +10254,8 @@ extension GlueClientTypes { extension GlueClientTypes { /// A structure that is used to specify a connection to create or update. public struct ConnectionInput { + /// This field is not currently used. + public var athenaProperties: [Swift.String: Swift.String]? /// The authentication properties of the connection. Used for a Salesforce connection. public var authenticationConfiguration: GlueClientTypes.AuthenticationConfigurationInput? /// These key-value pairs define parameters for the connection. @@ -10334,6 +10341,7 @@ extension GlueClientTypes { public var validateCredentials: Swift.Bool public init( + athenaProperties: [Swift.String: Swift.String]? = nil, authenticationConfiguration: GlueClientTypes.AuthenticationConfigurationInput? = nil, connectionProperties: [Swift.String: Swift.String]? = nil, connectionType: GlueClientTypes.ConnectionType? = nil, @@ -10344,6 +10352,7 @@ extension GlueClientTypes { validateCredentials: Swift.Bool = false ) { + self.athenaProperties = athenaProperties self.authenticationConfiguration = authenticationConfiguration self.connectionProperties = connectionProperties self.connectionType = connectionType @@ -15081,6 +15090,8 @@ public struct GetConnectionInput { extension GlueClientTypes { /// Defines a connection to a data source. public struct Connection { + /// This field is not currently used. + public var athenaProperties: [Swift.String: Swift.String]? /// The authentication properties of the connection. public var authenticationConfiguration: GlueClientTypes.AuthenticationConfiguration? /// These key-value pairs define parameters for the connection: @@ -15203,6 +15214,7 @@ extension GlueClientTypes { public var statusReason: Swift.String? public init( + athenaProperties: [Swift.String: Swift.String]? = nil, authenticationConfiguration: GlueClientTypes.AuthenticationConfiguration? = nil, connectionProperties: [Swift.String: Swift.String]? = nil, connectionType: GlueClientTypes.ConnectionType? = nil, @@ -15218,6 +15230,7 @@ extension GlueClientTypes { statusReason: Swift.String? = nil ) { + self.athenaProperties = athenaProperties self.authenticationConfiguration = authenticationConfiguration self.connectionProperties = connectionProperties self.connectionType = connectionType @@ -41159,6 +41172,7 @@ extension GlueClientTypes.Connection { value.connectionType = try reader["ConnectionType"].readIfPresent() value.matchCriteria = try reader["MatchCriteria"].readListIfPresent(memberReadingClosure: SmithyReadWrite.ReadingClosures.readString(from:), memberNodeInfo: "member", isFlattened: false) value.connectionProperties = try reader["ConnectionProperties"].readMapIfPresent(valueReadingClosure: SmithyReadWrite.ReadingClosures.readString(from:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) + value.athenaProperties = try reader["AthenaProperties"].readMapIfPresent(valueReadingClosure: SmithyReadWrite.ReadingClosures.readString(from:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) value.physicalConnectionRequirements = try reader["PhysicalConnectionRequirements"].readIfPresent(with: GlueClientTypes.PhysicalConnectionRequirements.read(from:)) value.creationTime = try reader["CreationTime"].readTimestampIfPresent(format: SmithyTimestamps.TimestampFormat.epochSeconds) value.lastUpdatedTime = try reader["LastUpdatedTime"].readTimestampIfPresent(format: SmithyTimestamps.TimestampFormat.epochSeconds) @@ -42513,6 +42527,7 @@ extension GlueClientTypes.ConnectionInput { static func write(value: GlueClientTypes.ConnectionInput?, to writer: SmithyJSON.Writer) throws { guard let value else { return } + try writer["AthenaProperties"].writeMap(value.athenaProperties, valueWritingClosure: SmithyReadWrite.WritingClosures.writeString(value:to:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) try writer["AuthenticationConfiguration"].write(value.authenticationConfiguration, with: GlueClientTypes.AuthenticationConfigurationInput.write(value:to:)) try writer["ConnectionProperties"].writeMap(value.connectionProperties, valueWritingClosure: SmithyReadWrite.WritingClosures.writeString(value:to:), keyNodeInfo: "key", valueNodeInfo: "value", isFlattened: false) try writer["ConnectionType"].write(value.connectionType) diff --git a/Sources/Services/AWSRDS/Sources/AWSRDS/Models.swift b/Sources/Services/AWSRDS/Sources/AWSRDS/Models.swift index 5d01e86332f..e24e17ebaa9 100644 --- a/Sources/Services/AWSRDS/Sources/AWSRDS/Models.swift +++ b/Sources/Services/AWSRDS/Sources/AWSRDS/Models.swift @@ -7019,7 +7019,7 @@ public struct CreateDBInstanceReadReplicaInput { /// The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. /// This member is required. public var dbInstanceIdentifier: Swift.String? - /// The name of the DB parameter group to associate with this DB instance. If you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: + /// The name of the DB parameter group to associate with this read replica DB instance. For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: /// /// * Must be 1 to 255 letters, numbers, or hyphens. /// @@ -8184,30 +8184,6 @@ public struct DBShardGroupAlreadyExistsFault: ClientRuntime.ModeledError, AWSCli } } -/// The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs). -public struct InvalidMaxAcuFault: ClientRuntime.ModeledError, AWSClientRuntime.AWSServiceError, ClientRuntime.HTTPError, Swift.Error { - - public struct Properties { - public internal(set) var message: Swift.String? = nil - } - - public internal(set) var properties = Properties() - public static var typeName: Swift.String { "InvalidMaxAcu" } - public static var fault: ClientRuntime.ErrorFault { .client } - public static var isRetryable: Swift.Bool { false } - public static var isThrottling: Swift.Bool { false } - public internal(set) var httpResponse = SmithyHTTPAPI.HTTPResponse() - public internal(set) var message: Swift.String? - public internal(set) var requestID: Swift.String? - - public init( - message: Swift.String? = nil - ) - { - self.properties.message = message - } -} - /// The maximum number of DB shard groups for your Amazon Web Services account in the specified Amazon Web Services Region has been reached. public struct MaxDBShardGroupLimitReached: ClientRuntime.ModeledError, AWSClientRuntime.AWSServiceError, ClientRuntime.HTTPError, Swift.Error { @@ -8257,13 +8233,13 @@ public struct UnsupportedDBEngineVersionFault: ClientRuntime.ModeledError, AWSCl } public struct CreateDBShardGroupInput { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: /// - /// * 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. + /// * 0 - Creates a DB shard group without a standby DB shard group. This is the default value. /// - /// * 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. + /// * 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). /// - /// * 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + /// * 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public var computeRedundancy: Swift.Int? /// The name of the primary DB cluster for the DB shard group. /// This member is required. @@ -8309,16 +8285,18 @@ public struct CreateDBShardGroupInput { } public struct CreateDBShardGroupOutput { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: /// - /// * 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. + /// * 0 - Creates a DB shard group without a standby DB shard group. This is the default value. /// - /// * 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. + /// * 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). /// - /// * 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + /// * 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public var computeRedundancy: Swift.Int? /// The name of the primary DB cluster for the DB shard group. public var dbClusterIdentifier: Swift.String? + /// The Amazon Resource Name (ARN) for the DB shard group. + public var dbShardGroupArn: Swift.String? /// The name of the DB shard group. public var dbShardGroupIdentifier: Swift.String? /// The Amazon Web Services Region-unique, immutable identifier for the DB shard group. @@ -8337,6 +8315,7 @@ public struct CreateDBShardGroupOutput { public init( computeRedundancy: Swift.Int? = nil, dbClusterIdentifier: Swift.String? = nil, + dbShardGroupArn: Swift.String? = nil, dbShardGroupIdentifier: Swift.String? = nil, dbShardGroupResourceId: Swift.String? = nil, endpoint: Swift.String? = nil, @@ -8348,6 +8327,7 @@ public struct CreateDBShardGroupOutput { { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier + self.dbShardGroupArn = dbShardGroupArn self.dbShardGroupIdentifier = dbShardGroupIdentifier self.dbShardGroupResourceId = dbShardGroupResourceId self.endpoint = endpoint @@ -10783,16 +10763,18 @@ public struct DeleteDBShardGroupInput { } public struct DeleteDBShardGroupOutput { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: /// - /// * 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. + /// * 0 - Creates a DB shard group without a standby DB shard group. This is the default value. /// - /// * 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. + /// * 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). /// - /// * 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + /// * 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public var computeRedundancy: Swift.Int? /// The name of the primary DB cluster for the DB shard group. public var dbClusterIdentifier: Swift.String? + /// The Amazon Resource Name (ARN) for the DB shard group. + public var dbShardGroupArn: Swift.String? /// The name of the DB shard group. public var dbShardGroupIdentifier: Swift.String? /// The Amazon Web Services Region-unique, immutable identifier for the DB shard group. @@ -10811,6 +10793,7 @@ public struct DeleteDBShardGroupOutput { public init( computeRedundancy: Swift.Int? = nil, dbClusterIdentifier: Swift.String? = nil, + dbShardGroupArn: Swift.String? = nil, dbShardGroupIdentifier: Swift.String? = nil, dbShardGroupResourceId: Swift.String? = nil, endpoint: Swift.String? = nil, @@ -10822,6 +10805,7 @@ public struct DeleteDBShardGroupOutput { { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier + self.dbShardGroupArn = dbShardGroupArn self.dbShardGroupIdentifier = dbShardGroupIdentifier self.dbShardGroupResourceId = dbShardGroupResourceId self.endpoint = endpoint @@ -13755,16 +13739,18 @@ public struct DescribeDBShardGroupsInput { extension RDSClientTypes { public struct DBShardGroup { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: /// - /// * 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. + /// * 0 - Creates a DB shard group without a standby DB shard group. This is the default value. /// - /// * 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. + /// * 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). /// - /// * 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + /// * 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public var computeRedundancy: Swift.Int? /// The name of the primary DB cluster for the DB shard group. public var dbClusterIdentifier: Swift.String? + /// The Amazon Resource Name (ARN) for the DB shard group. + public var dbShardGroupArn: Swift.String? /// The name of the DB shard group. public var dbShardGroupIdentifier: Swift.String? /// The Amazon Web Services Region-unique, immutable identifier for the DB shard group. @@ -13783,6 +13769,7 @@ extension RDSClientTypes { public init( computeRedundancy: Swift.Int? = nil, dbClusterIdentifier: Swift.String? = nil, + dbShardGroupArn: Swift.String? = nil, dbShardGroupIdentifier: Swift.String? = nil, dbShardGroupResourceId: Swift.String? = nil, endpoint: Swift.String? = nil, @@ -13794,6 +13781,7 @@ extension RDSClientTypes { { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier + self.dbShardGroupArn = dbShardGroupArn self.dbShardGroupIdentifier = dbShardGroupIdentifier self.dbShardGroupResourceId = dbShardGroupResourceId self.endpoint = endpoint @@ -18090,6 +18078,14 @@ public struct ModifyDBRecommendationOutput { } public struct ModifyDBShardGroupInput { + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: + /// + /// * 0 - Creates a DB shard group without a standby DB shard group. This is the default value. + /// + /// * 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). + /// + /// * 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. + public var computeRedundancy: Swift.Int? /// The name of the DB shard group to modify. /// This member is required. public var dbShardGroupIdentifier: Swift.String? @@ -18099,11 +18095,13 @@ public struct ModifyDBShardGroupInput { public var minACU: Swift.Double? public init( + computeRedundancy: Swift.Int? = nil, dbShardGroupIdentifier: Swift.String? = nil, maxACU: Swift.Double? = nil, minACU: Swift.Double? = nil ) { + self.computeRedundancy = computeRedundancy self.dbShardGroupIdentifier = dbShardGroupIdentifier self.maxACU = maxACU self.minACU = minACU @@ -18111,16 +18109,18 @@ public struct ModifyDBShardGroupInput { } public struct ModifyDBShardGroupOutput { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: /// - /// * 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. + /// * 0 - Creates a DB shard group without a standby DB shard group. This is the default value. /// - /// * 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. + /// * 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). /// - /// * 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + /// * 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public var computeRedundancy: Swift.Int? /// The name of the primary DB cluster for the DB shard group. public var dbClusterIdentifier: Swift.String? + /// The Amazon Resource Name (ARN) for the DB shard group. + public var dbShardGroupArn: Swift.String? /// The name of the DB shard group. public var dbShardGroupIdentifier: Swift.String? /// The Amazon Web Services Region-unique, immutable identifier for the DB shard group. @@ -18139,6 +18139,7 @@ public struct ModifyDBShardGroupOutput { public init( computeRedundancy: Swift.Int? = nil, dbClusterIdentifier: Swift.String? = nil, + dbShardGroupArn: Swift.String? = nil, dbShardGroupIdentifier: Swift.String? = nil, dbShardGroupResourceId: Swift.String? = nil, endpoint: Swift.String? = nil, @@ -18150,6 +18151,7 @@ public struct ModifyDBShardGroupOutput { { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier + self.dbShardGroupArn = dbShardGroupArn self.dbShardGroupIdentifier = dbShardGroupIdentifier self.dbShardGroupResourceId = dbShardGroupResourceId self.endpoint = endpoint @@ -18853,16 +18855,18 @@ public struct RebootDBShardGroupInput { } public struct RebootDBShardGroupOutput { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: /// - /// * 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. + /// * 0 - Creates a DB shard group without a standby DB shard group. This is the default value. /// - /// * 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. + /// * 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). /// - /// * 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + /// * 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public var computeRedundancy: Swift.Int? /// The name of the primary DB cluster for the DB shard group. public var dbClusterIdentifier: Swift.String? + /// The Amazon Resource Name (ARN) for the DB shard group. + public var dbShardGroupArn: Swift.String? /// The name of the DB shard group. public var dbShardGroupIdentifier: Swift.String? /// The Amazon Web Services Region-unique, immutable identifier for the DB shard group. @@ -18881,6 +18885,7 @@ public struct RebootDBShardGroupOutput { public init( computeRedundancy: Swift.Int? = nil, dbClusterIdentifier: Swift.String? = nil, + dbShardGroupArn: Swift.String? = nil, dbShardGroupIdentifier: Swift.String? = nil, dbShardGroupResourceId: Swift.String? = nil, endpoint: Swift.String? = nil, @@ -18892,6 +18897,7 @@ public struct RebootDBShardGroupOutput { { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier + self.dbShardGroupArn = dbShardGroupArn self.dbShardGroupIdentifier = dbShardGroupIdentifier self.dbShardGroupResourceId = dbShardGroupResourceId self.endpoint = endpoint @@ -24618,6 +24624,7 @@ extension ModifyDBShardGroupInput { static func write(value: ModifyDBShardGroupInput?, to writer: SmithyFormURL.Writer) throws { guard let value else { return } + try writer["ComputeRedundancy"].write(value.computeRedundancy) try writer["DBShardGroupIdentifier"].write(value.dbShardGroupIdentifier) try writer["MaxACU"].write(value.maxACU) try writer["MinACU"].write(value.minACU) @@ -25687,6 +25694,7 @@ extension CreateDBShardGroupOutput { var value = CreateDBShardGroupOutput() value.computeRedundancy = try reader["ComputeRedundancy"].readIfPresent() value.dbClusterIdentifier = try reader["DBClusterIdentifier"].readIfPresent() + value.dbShardGroupArn = try reader["DBShardGroupArn"].readIfPresent() value.dbShardGroupIdentifier = try reader["DBShardGroupIdentifier"].readIfPresent() value.dbShardGroupResourceId = try reader["DBShardGroupResourceId"].readIfPresent() value.endpoint = try reader["Endpoint"].readIfPresent() @@ -25985,6 +25993,7 @@ extension DeleteDBShardGroupOutput { var value = DeleteDBShardGroupOutput() value.computeRedundancy = try reader["ComputeRedundancy"].readIfPresent() value.dbClusterIdentifier = try reader["DBClusterIdentifier"].readIfPresent() + value.dbShardGroupArn = try reader["DBShardGroupArn"].readIfPresent() value.dbShardGroupIdentifier = try reader["DBShardGroupIdentifier"].readIfPresent() value.dbShardGroupResourceId = try reader["DBShardGroupResourceId"].readIfPresent() value.endpoint = try reader["Endpoint"].readIfPresent() @@ -26971,6 +26980,7 @@ extension ModifyDBShardGroupOutput { var value = ModifyDBShardGroupOutput() value.computeRedundancy = try reader["ComputeRedundancy"].readIfPresent() value.dbClusterIdentifier = try reader["DBClusterIdentifier"].readIfPresent() + value.dbShardGroupArn = try reader["DBShardGroupArn"].readIfPresent() value.dbShardGroupIdentifier = try reader["DBShardGroupIdentifier"].readIfPresent() value.dbShardGroupResourceId = try reader["DBShardGroupResourceId"].readIfPresent() value.endpoint = try reader["Endpoint"].readIfPresent() @@ -27158,6 +27168,7 @@ extension RebootDBShardGroupOutput { var value = RebootDBShardGroupOutput() value.computeRedundancy = try reader["ComputeRedundancy"].readIfPresent() value.dbClusterIdentifier = try reader["DBClusterIdentifier"].readIfPresent() + value.dbShardGroupArn = try reader["DBShardGroupArn"].readIfPresent() value.dbShardGroupIdentifier = try reader["DBShardGroupIdentifier"].readIfPresent() value.dbShardGroupResourceId = try reader["DBShardGroupResourceId"].readIfPresent() value.endpoint = try reader["Endpoint"].readIfPresent() @@ -27996,7 +28007,6 @@ enum CreateDBShardGroupOutputError { case "DBClusterNotFoundFault": return try DBClusterNotFoundFault.makeError(baseError: baseError) case "DBShardGroupAlreadyExists": return try DBShardGroupAlreadyExistsFault.makeError(baseError: baseError) case "InvalidDBClusterStateFault": return try InvalidDBClusterStateFault.makeError(baseError: baseError) - case "InvalidMaxAcu": return try InvalidMaxAcuFault.makeError(baseError: baseError) case "InvalidVPCNetworkStateFault": return try InvalidVPCNetworkStateFault.makeError(baseError: baseError) case "MaxDBShardGroupLimitReached": return try MaxDBShardGroupLimitReached.makeError(baseError: baseError) case "UnsupportedDBEngineVersion": return try UnsupportedDBEngineVersionFault.makeError(baseError: baseError) @@ -29462,7 +29472,6 @@ enum ModifyDBShardGroupOutputError { case "DBShardGroupAlreadyExists": return try DBShardGroupAlreadyExistsFault.makeError(baseError: baseError) case "DBShardGroupNotFound": return try DBShardGroupNotFoundFault.makeError(baseError: baseError) case "InvalidDBClusterStateFault": return try InvalidDBClusterStateFault.makeError(baseError: baseError) - case "InvalidMaxAcu": return try InvalidMaxAcuFault.makeError(baseError: baseError) default: return try AWSClientRuntime.UnknownAWSHTTPServiceError.makeError(baseError: baseError) } } @@ -31339,19 +31348,6 @@ extension DBShardGroupAlreadyExistsFault { } } -extension InvalidMaxAcuFault { - - static func makeError(baseError: AWSClientRuntime.AWSQueryError) throws -> InvalidMaxAcuFault { - let reader = baseError.errorBodyReader - var value = InvalidMaxAcuFault() - value.properties.message = try reader["message"].readIfPresent() - value.httpResponse = baseError.httpResponse - value.requestID = baseError.requestID - value.message = baseError.message - return value - } -} - extension DBSubnetGroupQuotaExceededFault { static func makeError(baseError: AWSClientRuntime.AWSQueryError) throws -> DBSubnetGroupQuotaExceededFault { @@ -33731,6 +33727,7 @@ extension RDSClientTypes.DBShardGroup { value.status = try reader["Status"].readIfPresent() value.publiclyAccessible = try reader["PubliclyAccessible"].readIfPresent() value.endpoint = try reader["Endpoint"].readIfPresent() + value.dbShardGroupArn = try reader["DBShardGroupArn"].readIfPresent() return value } } diff --git a/Sources/Services/AWSRDS/Sources/AWSRDS/RDSClient.swift b/Sources/Services/AWSRDS/Sources/AWSRDS/RDSClient.swift index d2f70488f7a..90371aa0bda 100644 --- a/Sources/Services/AWSRDS/Sources/AWSRDS/RDSClient.swift +++ b/Sources/Services/AWSRDS/Sources/AWSRDS/RDSClient.swift @@ -2087,7 +2087,6 @@ extension RDSClient { /// - `DBClusterNotFoundFault` : DBClusterIdentifier doesn't refer to an existing DB cluster. /// - `DBShardGroupAlreadyExistsFault` : The specified DB shard group name must be unique in your Amazon Web Services account in the specified Amazon Web Services Region. /// - `InvalidDBClusterStateFault` : The requested operation can't be performed while the cluster is in this state. - /// - `InvalidMaxAcuFault` : The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs). /// - `InvalidVPCNetworkStateFault` : The DB subnet group doesn't cover all Availability Zones after it's created because of users' change. /// - `MaxDBShardGroupLimitReached` : The maximum number of DB shard groups for your Amazon Web Services account in the specified Amazon Web Services Region has been reached. /// - `UnsupportedDBEngineVersionFault` : The specified DB engine version isn't supported for Aurora Limitless Database. @@ -8791,7 +8790,6 @@ extension RDSClient { /// - `DBShardGroupAlreadyExistsFault` : The specified DB shard group name must be unique in your Amazon Web Services account in the specified Amazon Web Services Region. /// - `DBShardGroupNotFoundFault` : The specified DB shard group name wasn't found. /// - `InvalidDBClusterStateFault` : The requested operation can't be performed while the cluster is in this state. - /// - `InvalidMaxAcuFault` : The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs). public func modifyDBShardGroup(input: ModifyDBShardGroupInput) async throws -> ModifyDBShardGroupOutput { let context = Smithy.ContextBuilder() .withMethod(value: .post) @@ -10688,7 +10686,7 @@ extension RDSClient { /// Performs the `RestoreDBInstanceFromDBSnapshot` operation on the `AmazonRDSv19` service. /// - /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see [Upgrading a MySQL DB snapshot engine version](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/mysql-upgrade-snapshot.html). For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, [Upgrading a PostgreSQL DB snapshot engine version](https://docs.aws.amazon.com/USER_UpgradeDBSnapshot.PostgreSQL.html). This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. + /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see [Upgrading a MySQL DB snapshot engine version](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/mysql-upgrade-snapshot.html). For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, [Upgrading a PostgreSQL DB snapshot engine version](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBSnapshot.PostgreSQL.html). This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. /// /// - Parameter RestoreDBInstanceFromDBSnapshotInput : /// diff --git a/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Models.swift b/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Models.swift index 1362e7590c0..9ad58894d61 100644 --- a/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Models.swift +++ b/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Models.swift @@ -1067,6 +1067,119 @@ public struct ListIndexesForMembersOutput { } } +public struct ListResourcesInput { + /// A search filter defines which resources can be part of a search query result set. + public var filters: ResourceExplorer2ClientTypes.SearchFilter? + /// The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. + public var maxResults: Swift.Int? + /// The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from. The pagination tokens expire after 24 hours. + public var nextToken: Swift.String? + /// Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a 401 Unauthorized exception. + public var viewArn: Swift.String? + + public init( + filters: ResourceExplorer2ClientTypes.SearchFilter? = nil, + maxResults: Swift.Int? = nil, + nextToken: Swift.String? = nil, + viewArn: Swift.String? = nil + ) + { + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.viewArn = viewArn + } +} + +extension ListResourcesInput: Swift.CustomDebugStringConvertible { + public var debugDescription: Swift.String { + "ListResourcesInput(maxResults: \(Swift.String(describing: maxResults)), nextToken: \(Swift.String(describing: nextToken)), viewArn: \(Swift.String(describing: viewArn)), filters: \"CONTENT_REDACTED\")"} +} + +extension ResourceExplorer2ClientTypes { + /// A structure that describes a property of a resource. + public struct ResourceProperty { + /// Details about this property. The content of this field is a JSON object that varies based on the resource type. + public var data: Smithy.Document? + /// The date and time that the information about this resource property was last updated. + public var lastReportedAt: Foundation.Date? + /// The name of this property of the resource. + public var name: Swift.String? + + public init( + data: Smithy.Document? = nil, + lastReportedAt: Foundation.Date? = nil, + name: Swift.String? = nil + ) + { + self.data = data + self.lastReportedAt = lastReportedAt + self.name = name + } + } + +} + +extension ResourceExplorer2ClientTypes { + /// A resource in Amazon Web Services that Amazon Web Services Resource Explorer has discovered, and for which it has stored information in the index of the Amazon Web Services Region that contains the resource. + public struct Resource { + /// The [Amazon resource name (ARN)](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the resource. + public var arn: Swift.String? + /// The date and time that Resource Explorer last queried this resource and updated the index with the latest information about the resource. + public var lastReportedAt: Foundation.Date? + /// The Amazon Web Services account that owns the resource. + public var owningAccountId: Swift.String? + /// A structure with additional type-specific details about the resource. These properties can be added by turning on integration between Resource Explorer and other Amazon Web Services services. + public var properties: [ResourceExplorer2ClientTypes.ResourceProperty]? + /// The Amazon Web Services Region in which the resource was created and exists. + public var region: Swift.String? + /// The type of the resource. + public var resourceType: Swift.String? + /// The Amazon Web Servicesservice that owns the resource and is responsible for creating and updating it. + public var service: Swift.String? + + public init( + arn: Swift.String? = nil, + lastReportedAt: Foundation.Date? = nil, + owningAccountId: Swift.String? = nil, + properties: [ResourceExplorer2ClientTypes.ResourceProperty]? = nil, + region: Swift.String? = nil, + resourceType: Swift.String? = nil, + service: Swift.String? = nil + ) + { + self.arn = arn + self.lastReportedAt = lastReportedAt + self.owningAccountId = owningAccountId + self.properties = properties + self.region = region + self.resourceType = resourceType + self.service = service + } + } + +} + +public struct ListResourcesOutput { + /// If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. The pagination tokens expire after 24 hours. + public var nextToken: Swift.String? + /// The list of structures that describe the resources that match the query. + public var resources: [ResourceExplorer2ClientTypes.Resource]? + /// The Amazon resource name (ARN) of the view that this operation used to perform the search. + public var viewArn: Swift.String? + + public init( + nextToken: Swift.String? = nil, + resources: [ResourceExplorer2ClientTypes.Resource]? = nil, + viewArn: Swift.String? = nil + ) + { + self.nextToken = nextToken + self.resources = resources + self.viewArn = viewArn + } +} + public struct ListSupportedResourceTypesInput { /// The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. public var maxResults: Swift.Int? @@ -1088,7 +1201,7 @@ extension ResourceExplorer2ClientTypes { public struct SupportedResourceType { /// The unique identifier of the resource type. public var resourceType: Swift.String? - /// The Amazon Web Service that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type. + /// The Amazon Web Servicesservice that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type. public var service: Swift.String? public init( @@ -1149,70 +1262,6 @@ extension ListTagsForResourceOutput: Swift.CustomDebugStringConvertible { "ListTagsForResourceOutput(tags: \"CONTENT_REDACTED\")"} } -extension ResourceExplorer2ClientTypes { - /// A structure that describes a property of a resource. - public struct ResourceProperty { - /// Details about this property. The content of this field is a JSON object that varies based on the resource type. - public var data: Smithy.Document? - /// The date and time that the information about this resource property was last updated. - public var lastReportedAt: Foundation.Date? - /// The name of this property of the resource. - public var name: Swift.String? - - public init( - data: Smithy.Document? = nil, - lastReportedAt: Foundation.Date? = nil, - name: Swift.String? = nil - ) - { - self.data = data - self.lastReportedAt = lastReportedAt - self.name = name - } - } - -} - -extension ResourceExplorer2ClientTypes { - /// A resource in Amazon Web Services that Amazon Web Services Resource Explorer has discovered, and for which it has stored information in the index of the Amazon Web Services Region that contains the resource. - public struct Resource { - /// The [Amazon resource name (ARN)](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the resource. - public var arn: Swift.String? - /// The date and time that Resource Explorer last queried this resource and updated the index with the latest information about the resource. - public var lastReportedAt: Foundation.Date? - /// The Amazon Web Services account that owns the resource. - public var owningAccountId: Swift.String? - /// A structure with additional type-specific details about the resource. These properties can be added by turning on integration between Resource Explorer and other Amazon Web Services services. - public var properties: [ResourceExplorer2ClientTypes.ResourceProperty]? - /// The Amazon Web Services Region in which the resource was created and exists. - public var region: Swift.String? - /// The type of the resource. - public var resourceType: Swift.String? - /// The Amazon Web Service that owns the resource and is responsible for creating and updating it. - public var service: Swift.String? - - public init( - arn: Swift.String? = nil, - lastReportedAt: Foundation.Date? = nil, - owningAccountId: Swift.String? = nil, - properties: [ResourceExplorer2ClientTypes.ResourceProperty]? = nil, - region: Swift.String? = nil, - resourceType: Swift.String? = nil, - service: Swift.String? = nil - ) - { - self.arn = arn - self.lastReportedAt = lastReportedAt - self.owningAccountId = owningAccountId - self.properties = properties - self.region = region - self.resourceType = resourceType - self.service = service - } - } - -} - extension ResourceExplorer2ClientTypes { /// Information about the number of results that match the query. At this time, Amazon Web Services Resource Explorer doesn't count more than 1,000 matches for any query. This structure provides information about whether the query exceeded this limit. This field is included in every page when you paginate the results. public struct ResourceCount { @@ -1437,6 +1486,13 @@ extension ListIndexesForMembersInput { } } +extension ListResourcesInput { + + static func urlPathProvider(_ value: ListResourcesInput) -> Swift.String? { + return "/ListResources" + } +} + extension ListSupportedResourceTypesInput { static func urlPathProvider(_ value: ListSupportedResourceTypesInput) -> Swift.String? { @@ -1601,6 +1657,17 @@ extension ListIndexesForMembersInput { } } +extension ListResourcesInput { + + static func write(value: ListResourcesInput?, to writer: SmithyJSON.Writer) throws { + guard let value else { return } + try writer["Filters"].write(value.filters, with: ResourceExplorer2ClientTypes.SearchFilter.write(value:to:)) + try writer["MaxResults"].write(value.maxResults) + try writer["NextToken"].write(value.nextToken) + try writer["ViewArn"].write(value.viewArn) + } +} + extension ListSupportedResourceTypesInput { static func write(value: ListSupportedResourceTypesInput?, to writer: SmithyJSON.Writer) throws { @@ -1823,6 +1890,20 @@ extension ListIndexesForMembersOutput { } } +extension ListResourcesOutput { + + static func httpOutput(from httpResponse: SmithyHTTPAPI.HTTPResponse) async throws -> ListResourcesOutput { + let data = try await httpResponse.data() + let responseReader = try SmithyJSON.Reader.from(data: data) + let reader = responseReader + var value = ListResourcesOutput() + value.nextToken = try reader["NextToken"].readIfPresent() + value.resources = try reader["Resources"].readListIfPresent(memberReadingClosure: ResourceExplorer2ClientTypes.Resource.read(from:), memberNodeInfo: "member", isFlattened: false) + value.viewArn = try reader["ViewArn"].readIfPresent() + return value + } +} + extension ListSupportedResourceTypesOutput { static func httpOutput(from httpResponse: SmithyHTTPAPI.HTTPResponse) async throws -> ListSupportedResourceTypesOutput { @@ -2152,6 +2233,25 @@ enum ListIndexesForMembersOutputError { } } +enum ListResourcesOutputError { + + static func httpError(from httpResponse: SmithyHTTPAPI.HTTPResponse) async throws -> Swift.Error { + let data = try await httpResponse.data() + let responseReader = try SmithyJSON.Reader.from(data: data) + let baseError = try AWSClientRuntime.RestJSONError(httpResponse: httpResponse, responseReader: responseReader, noErrorWrapping: false) + if let error = baseError.customError() { return error } + switch baseError.code { + case "AccessDeniedException": return try AccessDeniedException.makeError(baseError: baseError) + case "InternalServerException": return try InternalServerException.makeError(baseError: baseError) + case "ResourceNotFoundException": return try ResourceNotFoundException.makeError(baseError: baseError) + case "ThrottlingException": return try ThrottlingException.makeError(baseError: baseError) + case "UnauthorizedException": return try UnauthorizedException.makeError(baseError: baseError) + case "ValidationException": return try ValidationException.makeError(baseError: baseError) + default: return try AWSClientRuntime.UnknownAWSHTTPServiceError.makeError(baseError: baseError) + } + } +} + enum ListSupportedResourceTypesOutputError { static func httpError(from httpResponse: SmithyHTTPAPI.HTTPResponse) async throws -> Swift.Error { @@ -2500,17 +2600,6 @@ extension ResourceExplorer2ClientTypes.MemberIndex { } } -extension ResourceExplorer2ClientTypes.SupportedResourceType { - - static func read(from reader: SmithyJSON.Reader) throws -> ResourceExplorer2ClientTypes.SupportedResourceType { - guard reader.hasContent else { throw SmithyReadWrite.ReaderError.requiredValueNotPresent } - var value = ResourceExplorer2ClientTypes.SupportedResourceType() - value.service = try reader["Service"].readIfPresent() - value.resourceType = try reader["ResourceType"].readIfPresent() - return value - } -} - extension ResourceExplorer2ClientTypes.Resource { static func read(from reader: SmithyJSON.Reader) throws -> ResourceExplorer2ClientTypes.Resource { @@ -2539,6 +2628,17 @@ extension ResourceExplorer2ClientTypes.ResourceProperty { } } +extension ResourceExplorer2ClientTypes.SupportedResourceType { + + static func read(from reader: SmithyJSON.Reader) throws -> ResourceExplorer2ClientTypes.SupportedResourceType { + guard reader.hasContent else { throw SmithyReadWrite.ReaderError.requiredValueNotPresent } + var value = ResourceExplorer2ClientTypes.SupportedResourceType() + value.service = try reader["Service"].readIfPresent() + value.resourceType = try reader["ResourceType"].readIfPresent() + return value + } +} + extension ResourceExplorer2ClientTypes.ResourceCount { static func read(from reader: SmithyJSON.Reader) throws -> ResourceExplorer2ClientTypes.ResourceCount { diff --git a/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Paginators.swift b/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Paginators.swift index e6f19ae8b6f..3fd038825aa 100644 --- a/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Paginators.swift +++ b/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/Paginators.swift @@ -41,6 +41,38 @@ extension PaginatorSequence where OperationStackInput == ListIndexesForMembersIn return try await self.asyncCompactMap { item in item.indexes } } } +extension ResourceExplorer2Client { + /// Paginate over `[ListResourcesOutput]` results. + /// + /// When this operation is called, an `AsyncSequence` is created. AsyncSequences are lazy so no service + /// calls are made until the sequence is iterated over. This also means there is no guarantee that the request is valid + /// until then. If there are errors in your request, you will see the failures only after you start iterating. + /// - Parameters: + /// - input: A `[ListResourcesInput]` to start pagination + /// - Returns: An `AsyncSequence` that can iterate over `ListResourcesOutput` + public func listResourcesPaginated(input: ListResourcesInput) -> ClientRuntime.PaginatorSequence { + return ClientRuntime.PaginatorSequence(input: input, inputKey: \.nextToken, outputKey: \.nextToken, paginationFunction: self.listResources(input:)) + } +} + +extension ListResourcesInput: ClientRuntime.PaginateToken { + public func usingPaginationToken(_ token: Swift.String) -> ListResourcesInput { + return ListResourcesInput( + filters: self.filters, + maxResults: self.maxResults, + nextToken: token, + viewArn: self.viewArn + )} +} + +extension PaginatorSequence where OperationStackInput == ListResourcesInput, OperationStackOutput == ListResourcesOutput { + /// This paginator transforms the `AsyncSequence` returned by `listResourcesPaginated` + /// to access the nested member `[ResourceExplorer2ClientTypes.Resource]` + /// - Returns: `[ResourceExplorer2ClientTypes.Resource]` + public func resources() async throws -> [ResourceExplorer2ClientTypes.Resource] { + return try await self.asyncCompactMap { item in item.resources } + } +} extension ResourceExplorer2Client { /// Paginate over `[ListSupportedResourceTypesOutput]` results. /// diff --git a/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/ResourceExplorer2Client.swift b/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/ResourceExplorer2Client.swift index be322323a50..058eaf2e2e7 100644 --- a/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/ResourceExplorer2Client.swift +++ b/Sources/Services/AWSResourceExplorer2/Sources/AWSResourceExplorer2/ResourceExplorer2Client.swift @@ -714,7 +714,7 @@ extension ResourceExplorer2Client { /// Performs the `GetAccountLevelServiceConfiguration` operation on the `ResourceExplorer` service. /// - /// Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account or a delegated administrator with service access enabled can invoke this API call. + /// Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account can invoke this API call. /// /// - Parameter GetAccountLevelServiceConfigurationInput : [no documentation found] /// @@ -1139,6 +1139,80 @@ extension ResourceExplorer2Client { return try await op.execute(input: input) } + /// Performs the `ListResources` operation on the `ResourceExplorer` service. + /// + /// Returns a list of resources and their details that match the specified criteria. This query must use a view. If you don’t explicitly specify a view, then Resource Explorer uses the default view for the Amazon Web Services Region in which you call this operation. + /// + /// - Parameter ListResourcesInput : [no documentation found] + /// + /// - Returns: `ListResourcesOutput` : [no documentation found] + /// + /// - Throws: One of the exceptions listed below __Possible Exceptions__. + /// + /// __Possible Exceptions:__ + /// - `AccessDeniedException` : The credentials that you used to call this operation don't have the minimum required permissions. + /// - `InternalServerException` : The request failed because of internal service error. Try your request again later. + /// - `ResourceNotFoundException` : You specified a resource that doesn't exist. Check the ID or ARN that you used to identity the resource, and try again. + /// - `ThrottlingException` : The request failed because you exceeded a rate limit for this operation. For more information, see [Quotas for Resource Explorer](https://docs.aws.amazon.com/resource-explorer/latest/userguide/quotas.html). + /// - `UnauthorizedException` : The principal making the request isn't permitted to perform the operation. + /// - `ValidationException` : You provided an invalid value for one of the operation's parameters. Check the syntax for the operation, and try again. + public func listResources(input: ListResourcesInput) async throws -> ListResourcesOutput { + let context = Smithy.ContextBuilder() + .withMethod(value: .post) + .withServiceName(value: serviceName) + .withOperation(value: "listResources") + .withIdempotencyTokenGenerator(value: config.idempotencyTokenGenerator) + .withLogger(value: config.logger) + .withPartitionID(value: config.partitionID) + .withAuthSchemes(value: config.authSchemes ?? []) + .withAuthSchemeResolver(value: config.authSchemeResolver) + .withUnsignedPayloadTrait(value: false) + .withSocketTimeout(value: config.httpClientConfiguration.socketTimeout) + .withIdentityResolver(value: config.bearerTokenIdentityResolver, schemeID: "smithy.api#httpBearerAuth") + .withIdentityResolver(value: config.awsCredentialIdentityResolver, schemeID: "aws.auth#sigv4") + .withIdentityResolver(value: config.awsCredentialIdentityResolver, schemeID: "aws.auth#sigv4a") + .withRegion(value: config.region) + .withSigningName(value: "resource-explorer-2") + .withSigningRegion(value: config.signingRegion) + .build() + let builder = ClientRuntime.OrchestratorBuilder() + config.interceptorProviders.forEach { provider in + builder.interceptors.add(provider.create()) + } + config.httpInterceptorProviders.forEach { provider in + builder.interceptors.add(provider.create()) + } + builder.interceptors.add(ClientRuntime.URLPathMiddleware(ListResourcesInput.urlPathProvider(_:))) + builder.interceptors.add(ClientRuntime.URLHostMiddleware()) + builder.interceptors.add(ClientRuntime.ContentTypeMiddleware(contentType: "application/json")) + builder.serialize(ClientRuntime.BodyMiddleware(rootNodeInfo: "", inputWritingClosure: ListResourcesInput.write(value:to:))) + builder.interceptors.add(ClientRuntime.ContentLengthMiddleware()) + builder.deserialize(ClientRuntime.DeserializeMiddleware(ListResourcesOutput.httpOutput(from:), ListResourcesOutputError.httpError(from:))) + builder.interceptors.add(ClientRuntime.LoggerMiddleware(clientLogMode: config.clientLogMode)) + builder.retryStrategy(SmithyRetries.DefaultRetryStrategy(options: config.retryStrategyOptions)) + builder.retryErrorInfoProvider(AWSClientRuntime.AWSRetryErrorInfoProvider.errorInfo(for:)) + builder.applySigner(ClientRuntime.SignerMiddleware()) + let endpointParams = EndpointParams(endpoint: config.endpoint, region: config.region, useDualStack: config.useDualStack ?? false, useFIPS: config.useFIPS ?? false) + builder.applyEndpoint(AWSClientRuntime.EndpointResolverMiddleware(endpointResolverBlock: { [config] in try config.endpointResolver.resolve(params: $0) }, endpointParams: endpointParams)) + builder.interceptors.add(AWSClientRuntime.UserAgentMiddleware(serviceID: serviceName, version: "1.0", config: config)) + builder.selectAuthScheme(ClientRuntime.AuthSchemeMiddleware()) + builder.interceptors.add(AWSClientRuntime.AmzSdkInvocationIdMiddleware()) + builder.interceptors.add(AWSClientRuntime.AmzSdkRequestMiddleware(maxRetries: config.retryStrategyOptions.maxRetriesBase)) + var metricsAttributes = Smithy.Attributes() + metricsAttributes.set(key: ClientRuntime.OrchestratorMetricsAttributesKeys.service, value: "ResourceExplorer2") + metricsAttributes.set(key: ClientRuntime.OrchestratorMetricsAttributesKeys.method, value: "ListResources") + let op = builder.attributes(context) + .telemetry(ClientRuntime.OrchestratorTelemetry( + telemetryProvider: config.telemetryProvider, + metricsAttributes: metricsAttributes, + meterScope: serviceName, + tracerScope: serviceName + )) + .executeRequest(client) + .build() + return try await op.execute(input: input) + } + /// Performs the `ListSupportedResourceTypes` operation on the `ResourceExplorer` service. /// /// Retrieves a list of all resource types currently supported by Amazon Web Services Resource Explorer.