Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: update generated files #626

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open

Conversation

github-actions[bot]
Copy link
Contributor

@github-actions github-actions bot commented Aug 21, 2024

This is an automatically created PR. Changes were created by running make generate.

--- old_service_types_data.json	2024-10-23 02:48:53.333052100 +0000
+++ new_service_types_data.json	2024-10-23 02:48:54.605049091 +0000
@@ -211,13 +211,25 @@
         "title": "IP filter",
         "description": "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'"
       },
+      "log_output": {
+        "example": "INSIGHTS",
+        "type": "string",
+        "title": "log_output",
+        "description": "The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.",
+        "enum": [
+          "INSIGHTS",
+          "NONE",
+          "TABLE",
+          "INSIGHTS,TABLE"
+        ]
+      },
       "long_query_time": {
         "example": 10,
         "maximum": 3600,
         "minimum": 0,
         "type": "number",
         "title": "long_query_time",
-        "description": "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s"
+        "description": "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute."
       },
       "max_allowed_packet": {
         "example": 67108864,
@@ -335,7 +347,7 @@
         "example": true,
         "type": "boolean",
         "title": "slow_query_log",
-        "description": "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off"
+        "description": "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table."
       },
       "sort_buffer_size": {
         "example": 262144,
@@ -541,6 +553,7 @@
       "azure_migration": {
         "type": "object",
         "title": "",
+        "description": "Azure migration settings",
         "properties": {
           "account": {
             "pattern": "^[^\\r\\n]*$",
@@ -577,12 +590,23 @@
             "title": "Endpoint suffix",
             "description": "Defines the DNS suffix for Azure Storage endpoints."
           },
+          "indices": {
+            "pattern": "^(\\*?[a-z0-9._-]*\\*?|-\\*?[a-z0-9._-]*\\*?)(,(\\*?[a-z0-9._-]*\\*?|-\\*?[a-z0-9._-]*\\*?))*[,]?$",
+            "type": "string",
+            "title": "Indices to restore",
+            "description": "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify."
+          },
           "key": {
             "pattern": "^[^\\r\\n]*$",
             "type": "string",
             "title": "Account secret key",
             "description": "Azure account secret key. One of key or sas_token should be specified"
           },
+          "restore_global_state": {
+            "type": "boolean",
+            "title": "Restore the cluster state or not",
+            "description": "If true, restore the cluster state. Defaults to false"
+          },
           "sas_token": {
             "pattern": "^[^\\r\\n]*$",
             "type": "string",
@@ -610,7 +634,7 @@
         "minimum": 2,
         "type": "integer",
         "title": "Concurrent incoming/outgoing shard recoveries per node",
-        "description": "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2."
+        "description": "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to node cpu count * 2."
       },
       "custom_domain": {
         "example": "grafana.example.org",
@@ -654,6 +678,7 @@
       "gcs_migration": {
         "type": "object",
         "title": "",
+        "description": "Google Cloud Storage migration settings",
         "properties": {
           "base_path": {
             "pattern": "^[^\\r\\n]*$",
@@ -684,6 +709,17 @@
             "title": "Credentials",
             "description": "Google Cloud Storage credentials file content"
           },
+          "indices": {
+            "pattern": "^(\\*?[a-z0-9._-]*\\*?|-\\*?[a-z0-9._-]*\\*?)(,(\\*?[a-z0-9._-]*\\*?|-\\*?[a-z0-9._-]*\\*?))*[,]?$",
+            "type": "string",
+            "title": "Indices to restore",
+            "description": "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify."
+          },
+          "restore_global_state": {
+            "type": "boolean",
+            "title": "Restore the cluster state or not",
+            "description": "If true, restore the cluster state. Defaults to false"
+          },
           "snapshot_name": {
             "pattern": "^[^\\r\\n]*$",
             "type": "string",
@@ -1077,6 +1113,7 @@
       "s3_migration": {
         "type": "object",
         "title": "",
+        "description": "AWS S3 / AWS S3 compatible migration settings",
         "properties": {
           "access_key": {
             "pattern": "^[^\\r\\n]*$",
@@ -1113,12 +1150,23 @@
             "title": "The S3 service endpoint to connect",
             "description": "The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint"
           },
+          "indices": {
+            "pattern": "^(\\*?[a-z0-9._-]*\\*?|-\\*?[a-z0-9._-]*\\*?)(,(\\*?[a-z0-9._-]*\\*?|-\\*?[a-z0-9._-]*\\*?))*[,]?$",
+            "type": "string",
+            "title": "Indices to restore",
+            "description": "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify."
+          },
           "region": {
             "pattern": "^[^\\r\\n]*$",
             "type": "string",
             "title": "S3 region",
             "description": "S3 region"
           },
+          "restore_global_state": {
+            "type": "boolean",
+            "title": "Restore the cluster state or not",
+            "description": "If true, restore the cluster state. Defaults to false"
+          },
           "secret_key": {
             "pattern": "^[^\\r\\n]*$",
             "type": "string",
@@ -1212,6 +1260,174 @@
         "title": "Script max compilation rate - circuit breaker to prevent/minimize OOMs",
         "description": "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context"
       },
+      "search_backpressure": {
+        "type": "object",
+        "title": "Search Backpressure Settings",
+        "properties": {
+          "mode": {
+            "type": "string",
+            "title": "The search backpressure mode",
+            "description": "The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only",
+            "enum": [
+              "monitor_only",
+              "enforced",
+              "disabled"
+            ]
+          },
+          "node_duress": {
+            "type": "object",
+            "title": "Node duress settings",
+            "properties": {
+              "cpu_threshold": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The CPU usage threshold (as a percentage) required for a node to be considered to be under duress",
+                "description": "The CPU usage threshold (as a percentage) required for a node to be considered to be under duress. Default is 0.9"
+              },
+              "heap_threshold": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The heap usage threshold (as a percentage) required for a node to be considered to be under duress",
+                "description": "The heap usage threshold (as a percentage) required for a node to be considered to be under duress. Default is 0.7"
+              },
+              "num_successive_breaches": {
+                "minimum": 1,
+                "type": "integer",
+                "title": "The number of successive limit breaches after which the node is considered to be under duress",
+                "description": "The number of successive limit breaches after which the node is considered to be under duress. Default is 3"
+              }
+            }
+          },
+          "search_shard_task": {
+            "type": "object",
+            "title": "Search shard settings",
+            "properties": {
+              "cancellation_burst": {
+                "minimum": 1,
+                "type": "number",
+                "title": "The maximum number of search tasks to cancel in a single iteration of the observer thread",
+                "description": "The maximum number of search tasks to cancel in a single iteration of the observer thread. Default is 10.0"
+              },
+              "cancellation_rate": {
+                "minimum": 0,
+                "type": "number",
+                "title": "The maximum number of tasks to cancel per millisecond of elapsed time.",
+                "description": "The maximum number of tasks to cancel per millisecond of elapsed time. Default is 0.003"
+              },
+              "cancellation_ratio": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The maximum number of tasks to cancel",
+                "description": "The maximum number of tasks to cancel, as a percentage of successful task completions. Default is 0.1"
+              },
+              "cpu_time_millis_threshold": {
+                "minimum": 0,
+                "type": "integer",
+                "title": "The CPU usage threshold (in milliseconds) required for a single search shard task before it is considered for cancellation",
+                "description": "The CPU usage threshold (in milliseconds) required for a single search shard task before it is considered for cancellation. Default is 15000"
+              },
+              "elapsed_time_millis_threshold": {
+                "minimum": 0,
+                "type": "integer",
+                "title": "The elapsed time threshold (in milliseconds) required for a single search shard task before it is considered for cancellation",
+                "description": "The elapsed time threshold (in milliseconds) required for a single search shard task before it is considered for cancellation. Default is 30000"
+              },
+              "heap_moving_average_window_size": {
+                "minimum": 0,
+                "type": "integer",
+                "title": "The number of previously completed search shard tasks to consider when calculating the rolling average of heap usage",
+                "description": "The number of previously completed search shard tasks to consider when calculating the rolling average of heap usage. Default is 100"
+              },
+              "heap_percent_threshold": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The heap usage threshold (as a percentage) required for a single search shard task before it is considered for cancellation",
+                "description": "The heap usage threshold (as a percentage) required for a single search shard task before it is considered for cancellation. Default is 0.5"
+              },
+              "heap_variance": {
+                "minimum": 0,
+                "type": "number",
+                "title": "The minimum variance required for a single search shard task’s heap usage compared to the rolling average of previously completed tasks before it is considered for cancellation",
+                "description": "The minimum variance required for a single search shard task’s heap usage compared to the rolling average of previously completed tasks before it is considered for cancellation. Default is 2.0"
+              },
+              "total_heap_percent_threshold": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The heap usage threshold (as a percentage) required for the sum of heap usages of all search shard tasks before cancellation is applied",
+                "description": "The heap usage threshold (as a percentage) required for the sum of heap usages of all search shard tasks before cancellation is applied. Default is 0.5"
+              }
+            }
+          },
+          "search_task": {
+            "type": "object",
+            "title": "Search task settings",
+            "properties": {
+              "cancellation_burst": {
+                "minimum": 1,
+                "type": "number",
+                "title": "The maximum number of search tasks to cancel in a single iteration of the observer thread",
+                "description": "The maximum number of search tasks to cancel in a single iteration of the observer thread. Default is 5.0"
+              },
+              "cancellation_rate": {
+                "minimum": 0,
+                "type": "number",
+                "title": "The maximum number of search tasks to cancel per millisecond of elapsed time",
+                "description": "The maximum number of search tasks to cancel per millisecond of elapsed time. Default is 0.003"
+              },
+              "cancellation_ratio": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The maximum number of search tasks to cancel, as a percentage of successful search task completions",
+                "description": "The maximum number of search tasks to cancel, as a percentage of successful search task completions. Default is 0.1"
+              },
+              "cpu_time_millis_threshold": {
+                "minimum": 0,
+                "type": "integer",
+                "title": "The CPU usage threshold (in milliseconds) required for an individual parent task before it is considered for cancellation",
+                "description": "The CPU usage threshold (in milliseconds) required for an individual parent task before it is considered for cancellation. Default is 30000"
+              },
+              "elapsed_time_millis_threshold": {
+                "minimum": 0,
+                "type": "integer",
+                "title": "The elapsed time threshold (in milliseconds) required for an individual parent task before it is considered for cancellation",
+                "description": "The elapsed time threshold (in milliseconds) required for an individual parent task before it is considered for cancellation. Default is 45000"
+              },
+              "heap_moving_average_window_size": {
+                "minimum": 0,
+                "type": "integer",
+                "title": "The window size used to calculate the rolling average of the heap usage for the completed parent tasks",
+                "description": "The window size used to calculate the rolling average of the heap usage for the completed parent tasks. Default is 10"
+              },
+              "heap_percent_threshold": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The heap usage threshold (as a percentage) required for an individual parent task before it is considered for cancellation",
+                "description": "The heap usage threshold (as a percentage) required for an individual parent task before it is considered for cancellation. Default is 0.2"
+              },
+              "heap_variance": {
+                "minimum": 0,
+                "type": "number",
+                "title": "The heap usage variance required for an individual parent task before it is considered for cancellation",
+                "description": "The heap usage variance required for an individual parent task before it is considered for cancellation. A task is considered for cancellation when taskHeapUsage is greater than or equal to heapUsageMovingAverage * variance. Default is 2.0"
+              },
+              "total_heap_percent_threshold": {
+                "maximum": 1,
+                "minimum": 0,
+                "type": "number",
+                "title": "The heap usage threshold (as a percentage) required for the sum of heap usages of all search tasks before cancellation is applied",
+                "description": "The heap usage threshold (as a percentage) required for the sum of heap usages of all search tasks before cancellation is applied. Default is 0.5"
+              }
+            }
+          }
+        }
+      },
       "search_max_buckets": {
         "example": 10000,
         "maximum": 1000000,
@@ -1232,6 +1448,76 @@
         "title": "Service logging",
         "description": "Store logs for the service so that they are available in the HTTP API and console."
       },
+      "shard_indexing_pressure": {
+        "type": "object",
+        "title": "Shard indexing back pressure settings",
+        "properties": {
+          "enabled": {
+            "type": "boolean",
+            "title": "Enable or disable shard indexing backpressure",
+            "description": "Enable or disable shard indexing backpressure. Default is false"
+          },
+          "enforced": {
+            "type": "boolean",
+            "title": "Run shard indexing backpressure in shadow mode or enforced mode",
+            "description": "Run shard indexing backpressure in shadow mode or enforced mode.\n            In shadow mode (value set as false), shard indexing backpressure tracks all granular-level metrics,\n            but it doesn’t actually reject any indexing requests.\n            In enforced mode (value set as true),\n            shard indexing backpressure rejects any requests to the cluster that might cause a dip in its performance.\n            Default is false"
+          },
+          "operating_factor": {
+            "type": "object",
+            "title": "Operating factor",
+            "properties": {
+              "lower": {
+                "minimum": 0,
+                "type": "number",
+                "title": "Lower occupancy limit of the allocated quota of memory for the shard",
+                "description": "Specify the lower occupancy limit of the allocated quota of memory for the shard.\n                    If the total memory usage of a shard is below this limit,\n                    shard indexing backpressure decreases the current allocated memory for that shard.\n                    Default is 0.75"
+              },
+              "optimal": {
+                "minimum": 0,
+                "type": "number",
+                "title": "Optimal occupancy of the allocated quota of memory for the shard",
+                "description": "Specify the optimal occupancy of the allocated quota of memory for the shard.\n                    If the total memory usage of a shard is at this level,\n                    shard indexing backpressure doesn’t change the current allocated memory for that shard.\n                    Default is 0.85"
+              },
+              "upper": {
+                "minimum": 0,
+                "type": "number",
+                "title": "Upper occupancy limit of the allocated quota of memory for the shard",
+                "description": "Specify the upper occupancy limit of the allocated quota of memory for the shard.\n                    If the total memory usage of a shard is above this limit,\n                    shard indexing backpressure increases the current allocated memory for that shard.\n                    Default is 0.95"
+              }
+            }
+          },
+          "primary_parameter": {
+            "type": "object",
+            "title": "Primary parameter",
+            "properties": {
+              "node": {
+                "type": "object",
+                "title": "",
+                "properties": {
+                  "soft_limit": {
+                    "minimum": 0,
+                    "type": "number",
+                    "title": "Node soft limit",
+                    "description": "Define the percentage of the node-level memory\n                            threshold that acts as a soft indicator for strain on a node.\n                            Default is 0.7"
+                  }
+                }
+              },
+              "shard": {
+                "type": "object",
+                "title": "",
+                "properties": {
+                  "min_limit": {
+                    "minimum": 0,
+                    "type": "number",
+                    "title": "Shard min limit",
+                    "description": "Specify the minimum assigned quota for a new shard in any role (coordinator, primary, or replica).\n                            Shard indexing backpressure increases or decreases this allocated quota based on the inflow of traffic for the shard.\n                            Default is 0.001"
+                  }
+                }
+              }
+            }
+          }
+        }
+      },
       "thread_pool_analyze_queue_size": {
         "maximum": 2000,
         "minimum": 10,
@@ -1996,6 +2282,26 @@
         "title": "Automatic utility network IP Filter",
         "description": "Automatically allow connections from servers in the utility network within the same zone"
       },
+      "backup_hour": {
+        "example": 3,
+        "maximum": 23,
+        "minimum": 0,
+        "type": [
+          "integer",
+          "null"
+        ],
+        "title": "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed."
+      },
+      "backup_minute": {
+        "example": 30,
+        "maximum": 59,
+        "minimum": 0,
+        "type": [
+          "integer",
+          "null"
+        ],
+        "title": "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed."
+      },
       "ip_filter": {
         "default": [],
         "type": "array",
@@ -2153,7 +2459,7 @@
       },
       "redis_timeout": {
         "default": 300,
-        "maximum": 31536000,
+        "maximum": 2073600,
         "minimum": 0,
         "type": "integer",
         "title": "Redis idle connection timeout in seconds"
@@ -2178,5 +2484,206 @@
         "description": "Store logs for the service so that they are available in the HTTP API and console."
       }
     }
+  },
+  "valkey": {
+    "properties": {
+      "automatic_utility_network_ip_filter": {
+        "default": true,
+        "type": "boolean",
+        "title": "Automatic utility network IP Filter",
+        "description": "Automatically allow connections from servers in the utility network within the same zone"
+      },
+      "backup_hour": {
+        "example": 3,
+        "maximum": 23,
+        "minimum": 0,
+        "type": [
+          "integer",
+          "null"
+        ],
+        "title": "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed."
+      },
+      "backup_minute": {
+        "example": 30,
+        "maximum": 59,
+        "minimum": 0,
+        "type": [
+          "integer",
+          "null"
+        ],
+        "title": "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed."
+      },
+      "ip_filter": {
+        "default": [],
+        "type": "array",
+        "title": "IP filter",
+        "description": "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'"
+      },
+      "migration": {
+        "type": [
+          "object",
+          "null"
+        ],
+        "title": "Migrate data from existing server",
+        "properties": {
+          "dbname": {
+            "maxLength": 63,
+            "type": "string",
+            "title": "Database name for bootstrapping the initial connection"
+          },
+          "host": {
+            "maxLength": 255,
+            "type": "string",
+            "title": "Hostname or IP address of the server where to migrate data from"
+          },
+          "ignore_dbs": {
+            "maxLength": 2048,
+            "type": "string",
+            "title": "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment)"
+          },
+          "ignore_roles": {
+            "maxLength": 2048,
+            "type": "string",
+            "title": "Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment)"
+          },
+          "method": {
+            "type": "string",
+            "title": "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types)",
+            "enum": [
+              "dump",
+              "replication"
+            ]
+          },
+          "password": {
+            "maxLength": 256,
+            "type": "string",
+            "title": "Password for authentication with the server where to migrate data from"
+          },
+          "port": {
+            "maximum": 65535,
+            "minimum": 1,
+            "type": "integer",
+            "title": "Port number of the server where to migrate data from"
+          },
+          "ssl": {
+            "type": "boolean",
+            "title": "The server where to migrate data from is secured with SSL"
+          },
+          "username": {
+            "maxLength": 256,
+            "type": "string",
+            "title": "User name for authentication with the server where to migrate data from"
+          }
+        }
+      },
+      "public_access": {
+        "default": false,
+        "type": "boolean",
+        "title": "Public Access",
+        "description": "Allow access to the service from the public Internet"
+      },
+      "service_log": {
+        "example": true,
+        "type": [
+          "boolean",
+          "null"
+        ],
+        "title": "Service logging",
+        "description": "Store logs for the service so that they are available in the HTTP API and console."
+      },
+      "valkey_acl_channels_default": {
+        "type": "string",
+        "title": "Default ACL for pub/sub channels used when a Valkey user is created",
+        "description": "Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Valkey configuration acl-pubsub-default.",
+        "enum": [
+          "allchannels",
+          "resetchannels"
+        ]
+      },
+      "valkey_io_threads": {
+        "example": 1,
+        "maximum": 32,
+        "minimum": 1,
+        "type": "integer",
+        "title": "Valkey IO thread count",
+        "description": "Set Valkey IO thread count. Changing this will cause a restart of the Valkey service."
+      },
+      "valkey_lfu_decay_time": {
+        "default": 1,
+        "maximum": 120,
+        "minimum": 1,
+        "type": "integer",
+        "title": "LFU maxmemory-policy counter decay time in minutes"
+      },
+      "valkey_lfu_log_factor": {
+        "default": 10,
+        "maximum": 100,
+        "minimum": 0,
+        "type": "integer",
+        "title": "Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies"
+      },
+      "valkey_maxmemory_policy": {
+        "default": "noeviction",
+        "type": [
+          "string",
+          "null"
+        ],
+        "title": "Valkey maxmemory-policy",
+        "enum": [
+          "noeviction",
+          "allkeys-lru",
+          "volatile-lru",
+          "allkeys-random",
+          "volatile-random",
+          "volatile-ttl",
+          "volatile-lfu",
+          "allkeys-lfu"
+        ]
+      },
+      "valkey_notify_keyspace_events": {
+        "default": "",
+        "maxLength": 32,
+        "pattern": "^[KEg\\$lshzxentdmA]*$",
+        "type": "string",
+        "title": "Set notify-keyspace-events option"
+      },
+      "valkey_number_of_databases": {
+        "example": 16,
+        "maximum": 128,
+        "minimum": 1,
+        "type": "integer",
+        "title": "Number of Valkey databases",
+        "description": "Set number of Valkey databases. Changing this will cause a restart of the Valkey service."
+      },
+      "valkey_persistence": {
+        "type": "string",
+        "title": "Valkey persistence",
+        "description": "When persistence is 'rdb', Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.",
+        "enum": [
+          "off",
+          "rdb"
+        ]
+      },
+      "valkey_pubsub_client_output_buffer_limit": {
+        "example": 64,
+        "maximum": 512,
+        "minimum": 32,
+        "type": "integer",
+        "title": "Pub/sub client output buffer hard limit in MB",
+        "description": "Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan."
+      },
+      "valkey_ssl": {
+        "default": true,
+        "type": "boolean",
+        "title": "Require SSL to access Valkey"
+      },
+      "valkey_timeout": {
+        "default": 300,
+        "maximum": 2073600,
+        "minimum": 0,
+        "type": "integer",
+        "title": "Valkey idle connection timeout in seconds"
+      }
+    }
   }
 }

@github-actions github-actions bot requested a review from a team as a code owner August 21, 2024 02:43
@github-actions github-actions bot force-pushed the chore/make-generate branch 2 times, most recently from 20962e7 to 2058a9b Compare September 6, 2024 02:47
@github-actions github-actions bot force-pushed the chore/make-generate branch 3 times, most recently from 96f5553 to ff40937 Compare September 25, 2024 02:50
@github-actions github-actions bot force-pushed the chore/make-generate branch 2 times, most recently from 9a68bc5 to 02b30a2 Compare September 28, 2024 02:48
@github-actions github-actions bot force-pushed the chore/make-generate branch 4 times, most recently from 35945a5 to 84f953c Compare October 10, 2024 02:48
@github-actions github-actions bot force-pushed the chore/make-generate branch 3 times, most recently from d50c472 to 7f25e92 Compare October 17, 2024 02:49
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant