diff --git a/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx b/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx index b64ded13bca..e40a26f6a21 100644 --- a/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx +++ b/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx @@ -59,7 +59,7 @@ On the Set Up Identity Provider page: For example: - ![](../images/nameID.png) + ![](images/nameID.png) 3. In the **SAML settings** section, enter the configuration information for your preferred SAML identity provider: | Field | Description | diff --git a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx index bef88723b51..bd0f32dbe2a 100644 --- a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx +++ b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx @@ -33,7 +33,7 @@ The command options that work with Migration Toolkit are grouped by their behavi !!!note - If you are using several options for a migration, or have to specify a long list of objects for an option, consider using the `-optionsFile` option to specify the values in a separate text file. See [Specifying options using a file](mtk_command_options_in_file) for more information. + If you're using several options for a migration or have to specify a long list of objects for an option, consider using the `-optionsFile` option to specify the values in a separate text file. See [Specifying options using a file](mtk_command_options_in_file) for more information. !!! ## Offline migration options diff --git a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/creating_txt_file.mdx b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/creating_txt_file.mdx index 9dd43c71e54..669c2ef1bfc 100644 --- a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/creating_txt_file.mdx +++ b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/creating_txt_file.mdx @@ -4,19 +4,19 @@ navTitle: "Creating the options file" deepToC: true --- -To specify a text file that contains command line options and values, you must create a `` file with the options you want to execute, and then run the `runMTK.sh` command with the `-optionsFile ` argument. +To specify a text file that contains command line options and values, create an `` file with the options you want to execute. Then run the `runMTK.sh` command with the `-optionsFile ` argument. !!!note - You can store the `` file in the same folder as the MTK utility, or in a directory of your choice. + You can store the `` file in the same folder as the Migration Toolkit utility or in a directory of your choice. !!! ## Creating the text file - guidelines -Create a text file on a location that is accessible by the user performing the migration. Then, add an option per line following these guidelines: +Create a text file in a location that's accessible by the user performing the migration. Then, add one option per line following these guidelines. ### Skip the dash -Use the same option syntax as in the command line options without the dash sign (-). The options file recognizes each line as a new parameter and doesn't require a dash as a delimiter. +Use the same option syntax as in the command line options without the dash (-). The options file recognizes each line as a new parameter and doesn't require a dash as a delimiter. | Option in CLI | Supported syntax for the option in file | |----------------|-----------------------------------------| @@ -24,19 +24,19 @@ Use the same option syntax as in the command line options without the dash sign ### Use a line per option -Use a new line for each option. Use a line per key-value pair. +Use a new line for each option. Use one line per key-value pair. | Option in CLI | Supported syntax for the option in file | |----------------------------------------------------------------|---------------------------------------------------------------| | `-tables hr -allViews`
`-excludeViews all_emp,acct_list` | tables hr
allViews
excludeViews all_emp,acct_list | -### Specify values for an option in a single or multiple lines +### Specify values for an option in a single line or multiple lines You can add a single line with an option and multiple values for that option. You can add several lines for the same option, each time with a different value. -And you can combine both approaches, because MTK will combine all lines that have the same option. +And you can combine both approaches, because Migration Toolkit combines all lines that specify the same option. | Option in CLI | Supported syntax for the option in file | |--------------------------|-------------------------------------------------| @@ -45,64 +45,70 @@ And you can combine both approaches, because MTK will combine all lines that hav | | tables TAB1
tables TAB2,TAB3 | !!!note - The duplication of options is only supported for the options file.
- When using `-tables` in the command line more than one time, only the last parameter is executed. For example, `./runMTK.sh -tables TAB2 -tables TAB3` only includes the _TAB3_ table in the migration. + Duplicating options is supported only for the options file.
+ When using `-tables` at the command line more than one time, only the last parameter is executed. For example, `./runMTK.sh -tables TAB2 -tables TAB3` includes only the `TAB3` table in the migration. !!! ### Use a space or equal sign to provide values -For options that require values (key-value pairs), separate the option from the value with a space or an equal sign (=). Both options are supported. +For options that require values (key-value pairs), separate the option from the value with a space or an equal sign (=). Both syntaxes are supported. | Option in CLI | Supported syntax for the option in file | |------------------|-----------------------------------------| | `-views all_emp` | views=all_emp | | | views all_emp | -### Don't specify an option in the file and command line +### Don't specify an option in the file and at the command line -Don't include an option in the options file if you're specifying it as a flag in the command line. +Don't include an option in the options file if you're specifying it as a flag at the command line. -Specifying an option in both the command line and in the text file will cause the migration to fail. +Specifying an option both at the command line and in the text file causes the migration to fail. ## Order of processing -Migration Toolkit reads command line options and option files in the order you provide them when running the command. +Migration Toolkit reads command line options and option files in the order you provide them when running the command. Ensure you add the [schema scope](executing_migration_with_txt/#provide-the-scope-for-the-schema-migration) (`schema_name` or `-allSchemas`) as the last parameter at the command line. -For example, if you run the following command, MTK first recognizes the `-sourcedbtype oracle` option, then reads the contents of `example.options` in order from top to bottom. Last parameter is always `schema_name`. +For example, if you run the following command, Migration Toolkit first recognizes the `-sourcedbtype oracle` option, and then reads the contents of `options_textfile` in order from top to bottom. The last parameter is the schema scope (`` or `-allSchemas`). ```shell -runMTK.sh -sourcedbtype oracle -optionsFile example.options schema_name +runMTK.sh -sourcedbtype oracle -optionsFile options_textfile schema_name ``` -If you want an option to be executed last, you can either put it at the end of the `example.options` file and add no further options after `-optionsFile` in the command line. +Using an options file means that you can employ different syntaxes to perform a migration where parameters are executed in the same way. The following alternatives perform the same migration. -Schema definition (`schema_name` or `-allSchemas`) must always be the last parameter on the command line. +**Alternative 1** ```shell -runMTK.sh -sourcedbtype oracle -optionsFile example.options schema_name +runMTK.sh -sourcedbtype oracle -optionsFile ``` -Where the content of the `example.options` file is: +Where the content of the `` file is: ``` tables=TAB1 dataOnly ``` -Or you can place the options flag (-dataOnly in this case) in the command line after specifying the `-optionsFile`: +**Alternative 2** ```shell -runMTK.sh -sourcedbtype oracle -optionsFile example.options -dataOnly schema_name +runMTK.sh -sourcedbtype oracle -optionsFile -dataOnly +``` + +Where the content of the `` file is: + +``` +tables=TAB1 ``` ## Other considerations -- **Commenting out**: Use a pound sign (#) to comment out a line. MTK won’t execute that line. +- **Commenting out** — Use a pound sign (#) to comment out a line. Migration Toolkit doesn't execute lines that are commented out. -- **Limitations**: Do not use quote marks. +- **Limitations** — Don't use quote marks. -- **Space processing**: MTK removes spaces between an option and its value. When you separate an option from its value with a space, only one space is treated as a delimiter. +- **Space processing** — Migration Toolkit removes spaces between an option and its value. When you separate an option from its value with a space, only one space is treated as a delimiter. - For example, to run the `-views all_emp, mgmt_list, acct_list` command line option, you can add _views all_emp,mgmt_list,acct_list_ OR _views = all_emp,mgmt_list,acct_list_ in the options file. + For example, to run the `-views all_emp, mgmt_list, acct_list` command line option, you can add `views all_emp,mgmt_list,acct_list` or `views = all_emp,mgmt_list,acct_list` in the options file. -After you have created the options file, [execute the migration with the options file](executing_migration_with_txt). +After you create the options file, [execute the migration with it](executing_migration_with_txt). diff --git a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/executing_migration_with_txt.mdx b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/executing_migration_with_txt.mdx index 323f0562ec3..0c9d94eb0b2 100644 --- a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/executing_migration_with_txt.mdx +++ b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/executing_migration_with_txt.mdx @@ -4,31 +4,31 @@ navTitle: "Executing a migration with the options file" deepToC: true --- -After you have created the options file, execute the migration command by referencing the options file: +After you create the options file, reference it when executing the migration command: ```shell -./runMTK.sh -optionsFile +./runMTK.sh -optionsFile ``` !!!note - Provide the full path for `` if you are running the MTK command from a different folder than where you have stored the file. + Provide the full path for `` if you're running the Migration Toolkit command from a different folder from where you stored the file. !!! -You can specify all options in the file, or use a mix of command line and options file to specify the migration parameters. +You can specify all options in the file or use a mix of the command line and the options file to specify the migration parameters. ## Provide the scope for the schema migration -You must specify the scope of the schemas to be migrated. +Specify the scope of the schemas to migrate: -- If you want to migrate all schemas: Add the `-allSchemas` option in the command line or add `allSchemas` in your options file. Regardless of which method you use, this option must be the last parameter. +- If you want to migrate all schemas, add the `-allSchemas` option at the command line or add `allSchemas` in your options file. For both methods, this option must be the last parameter. -- If you want to specify one or several schemas: Specify the schemas you want to migrate in the command line with no preceding option. They have to be the last parameter in the command line. +- If you want to specify a subset of schemas, specify the schemas you want to migrate at the command line with no preceding option and as a comma-separated list. Schema specifications must be the last parameter at the command line. -Here are some examples for specifying all options in the file: +Here are some examples for specifying all options in the file. ## Migrate a schema with specific tables -Content of the `` text file: +Content of the `options_textfile` text file: ``` tables comp_schema.emp,comp_schema.dept,finance_schema.acctg @@ -37,7 +37,7 @@ tables comp_schema.emp,comp_schema.dept,finance_schema.acctg Syntax of the migration command: ```shell -./runMTK.sh -optionsFile example.options schema_name +./runMTK.sh -optionsFile options_textfile schema_name ``` Command line equivalent: @@ -46,15 +46,15 @@ Command line equivalent: ./runMTK.sh -tables comp_schema.emp,comp_schema.dept,finance_schema.acctg schema_name ``` -## Use Options File to exclude tables and include functions +## Use options file to exclude tables and include functions -The options file can be used to separate table names from configuration options. -It may also contain comments to clarify why parameters are used. +You can use the options file to separate table names from configuration options. +You can also use comments to clarify why parameters are used. -In this example we are excluding tables and including functions. -It shows some extra comments, to explain why tables are excluded. +This example excludes tables and includes functions. +It shows some comments to explain why tables are excluded. -Content of the `` text file: +Content of the `excludeInclude.options` text file: ``` # finances @@ -84,7 +84,7 @@ Command line equivalent: Run an offline migration with all schemas. -Content of the `` text file: +Content of the `options_textfile` text file: ``` offlineMigration file_dest @@ -95,7 +95,7 @@ schemaOnly Syntax of the migration command: ```shell -./runMTK.sh -optionsFile example.options -allSchemas +./runMTK.sh -optionsFile options_textfile -allSchemas ``` Command line equivalent: diff --git a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/index.mdx b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/index.mdx index 29b6a5513b2..ae74cad94cb 100644 --- a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/index.mdx +++ b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/mtk_command_options_in_file/index.mdx @@ -4,15 +4,13 @@ navTitle: "Specifying options using a file" deepToC: true --- -EDB supports using a text file to specify several [database migration options](../08_mtk_command_options.mdx) instead of manually entering them in the command line. This is particularly helpful when: +EDB supports using a text file to specify several [database migration options](../08_mtk_command_options.mdx) instead of manually entering them at the command line. This is particularly helpful when: -- You have to specify a large number of command line options for complex migration scenarios. Entering all options in the command line becomes troublesome, or you might have even reached command line length limits. +- You have to specify a large number of command line options or a large number of values for complex migration scenarios. Entering all options or values at the command line becomes cumbersome, or you might reach command line length limits. -- You have to specify a large number of values for a command line option for complex migration scenarios. Entering all values in the command line becomes troublesome, or you might have even reached command line length limits. +- You want to apply the same migration options and values to several different databases, or you might want to automate database migration. Running `runMTK.sh` with several options and values repeatedly at the command line becomes prone to errors. -- You want to apply the same migration options and values to several different databases, or you might even consider automating the migration of databases. Running `runMTK.sh` with several options and values repeatedly in the command line becomes prone to errors. - -The `-optionsFile ` argument allows you to reference a text file with all options and values, making the migration process easier. +The `-optionsFile ` argument allows you to reference a text file with options and values, making the migration process easier. ## Execute a migration using an options file @@ -21,4 +19,3 @@ To perform a migration with a file that specifies the command options: 1. [Create a text file with the command options](creating_txt_file). 1. [Perform a migration by invoking the file with the command options](executing_migration_with_txt). - diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx index 39c05d55cc9..fca3b2256ca 100644 --- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx +++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx @@ -3,36 +3,50 @@ title: "Configuring PgBouncer" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/pgbouncer-configuration-guide/8.0/configuring_pgBouncer.html" +deepToC: true redirects: - /pem/latest/pem_pgbouncer/03_configuring_pgBouncer/ - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/03_pem_pgbouncer_configuring_pgbouncer/ --- -You must configure PgBouncer to work with the PEM database server. +You must configure PgBouncer to work with the PEM database server. -The name and location of the directories and files in the configuration steps depend on whether you installed the community version of PgBouncer or EDB PgBouncer. The instructions assume you are using EDB PgBouncer. If you have installed PgBouncer from the community repo, replace the names of the files and directories in the example with the PgBouncer values. +## Prerequisites +- If you are running EDB Postgres Advanced Server, you have installed [EDB PgBouncer](/pgbouncer/latest/installing). -| Name | PgBouncer | EDB PgBouncer | -|---------------------|------------------------------------|----------------------------------------| -| PgBouncer directory | `/etc/pgbouncer<1.x>` | `/etc/edb/pgbouncer<1.x>` | -| ini file | `pgbouncer.ini` | `edb-pgbouncer.ini` | -| userlist file | `/etc/pgbouncer<1.x>/userlist.txt` | `/etc/edb/pgbouncer<1.x>/userlist.txt` | -| HBA file | `(/etc/pgbouncer<1.x>/hba_file)` | `(/etc/edb/pgbouncer<1.x>/hba_file)` | -| Service file | `pgbouncer-<1.x>` | `edb-pgbouncer-<1.x>` | + Or -This example runs PgBouncer as the enterprisedb system user and outlines the process of configuring PgBouncer. +- If you are running EDB Postgres Extended Server or PostgreSQL, you have installed community [PgBouncer](https://www.pgbouncer.org/install.html). + +### EDB PgBouncer and PgBouncer installation considerations + +The name and location of the directories and files in the configuration steps, as well as the user, depend on whether you installed the community version of PgBouncer or EDB PgBouncer. If you have installed community PgBouncer (whether you install it from the community repo or the EDB repo), replace the names of the files and directories in the worked example with the values for PgBouncer. + +| Name | PgBouncer | EDB PgBouncer | +|---------------------|---------------------------|------------------------------------| +| PgBouncer directory | `/etc/pgbouncer` | `/etc/edb/pgbouncer<1.x>` | +| ini file | `pgbouncer.ini` | `edb-pgbouncer.ini` | +| HBA file | `/etc/pgbouncer/hba_file` | `/etc/edb/pgbouncer<1.x>/hba_file` | +| Service file | `pgbouncer` | `edb-pgbouncer-<1.x>` | +| User | `postgres` | `enterprisedb` | + +## Configuring PgBouncer + +This example configures EDB PgBouncer with the `enterprisedb` system user. + +If you are running community PgBouncer, replace the names of the directories, files and user as explained in [Location of PgBouncer directories](#edb-pgbouncer-and-pgbouncer-installation-considerations) . 1. Open a terminal window and navigate to the PgBouncer directory. -2. Change the owner of the `etc` directory for PgBouncer (where `pgbouncer.ini` resides) to `enterprisedb`, and change the directory permissions to `0700`: +1. Change the owner of the `etc` directory for PgBouncer (where `edb-pgbouncer.ini` resides) to `enterprisedb`, and change the directory permissions to `0700`: ```shell - $ chown -R enterprisedb:enterprisedb /etc/edb/pgbouncer<1.x> - $ chmod 0700 /etc/edb/pgbouncer<1.x> + chown -R enterprisedb:enterprisedb /etc/edb/pgbouncer<1.x> + chmod 0700 /etc/edb/pgbouncer<1.x> ``` -3. Change the contents of the `pgbouncer.ini` or `edb-pgbouncer.ini` file: +1. Change the contents of the `edb-pgbouncer.ini` file: ```ini [databases] @@ -40,11 +54,8 @@ This example runs PgBouncer as the enterprisedb system user and outlines the pro ;; to the PEM database server as required. ;; 'auth_user' will be used for authenticate the db user (proxy ;; agent user in our case) - - pem = port=5444 host=localhost dbname=pem auth_user=pgbouncer - pool_size=80 pool_mode=transaction - * = port=5444 host=localhost dbname=pem auth_user=pgbouncer - pool_size=10 + pem = port=5444 host=127.0.0.1 dbname=pem auth_user=pgbouncer pool_size=80 pool_mode=transaction + * = port=5444 host=127.0.0.1 dbname=pem auth_user=pgbouncer pool_size=10 [pgbouncer] logfile = /var/log/edb/pgbouncer<1.x>/edb-pgbouncer-<1.x>.log @@ -52,20 +63,21 @@ This example runs PgBouncer as the enterprisedb system user and outlines the pro listen_addr = * ;; Agent needs to use this port to connect the pem database now listen_port = 6432 - ;; Require to support for the SSL Certificate authentications + ;; Set to require to ensure SSL certificates are used for connections ;; for PEM Agents client_tls_sslmode = require ;; These are the root.crt, server.key, server.crt files present ;; in the present under the data directory of the PEM database ;; server, used by the PEM Agents for connections. - client_tls_ca_file = /var/lib/edb/as11/data/root.crt - client_tls_key_file = /var/lib/edb/as11/data/server.key - client_tls_cert_file = /var/lib/edb/as11/data/server.crt + client_tls_ca_file = /var/lib/edb/as16/data/root.crt + client_tls_key_file = /var/lib/edb/as16/data/server.key + client_tls_cert_file = /var/lib/edb/as16/data/server.crt + ;; Allow pgBouncer to use pem_agent_pool certificate + ;; and key for connections to the server. + server_tls_key_file = /var/lib/edb/.postgresql/pem_agent_pool.key + server_tls_cert_file = /var/lib/edb/.postgresql/pem_agent_pool.crt ;; Use hba file for client connections auth_type = hba - ;; Authentication file, Reference: - ;; https://pgbouncer.github.io/config.html#auth_file - auth_file = /etc/edb/pgbouncer<1.x>/userlist.txt ;; HBA file auth_hba_file = /etc/edb/pgbouncer<1.x>/hba_file ;; Use pem.get_agent_pool_auth(TEXT) function to authenticate @@ -73,6 +85,10 @@ This example runs PgBouncer as the enterprisedb system user and outlines the pro auth_query = SELECT * FROM pem.get_agent_pool_auth($1) ;; DB User for administration of the pgbouncer admin_users = pem_admin1 + ;; auth_dbname and auth_user allow + ;; admin console login by admin_users and stats_users + auth_dbname = pem + auth_user = pgbouncer ;; DB User for collecting the statistics of pgbouncer stats_users = pem_admin1 server_reset_query = DISCARD ALL @@ -83,64 +99,35 @@ This example runs PgBouncer as the enterprisedb system user and outlines the pro server_idle_timeout = 60 ``` -!!! Note - For more information on `auth_user` see [Authentication settings](https://www.pgbouncer.org/config.html#authentication-settings). - -4. Create and update the `/etc/edb/pgbouncer<1.x>/userlist.txt` authentication file for PgBouncer: - - ```sql - ## Connect to pem database as a superuser, - ## create the userslist.txt file and add - ## username and their password list in CSV format - $ psql -p 5444 -d pem -U enterprisedb - - pem=# - COPY ( - SELECT 'pgbouncer'::TEXT, 'pgbouncer_password' - UNION ALL - SELECT 'pem_admin1'::TEXT, 'pem_admin1_password') - TO '/etc/edb/pgbouncer<1.x>/userlist.txt' - WITH (FORMAT CSV, DELIMITER ' ', FORCE_QUOTE *); - __OUTPUT__ - COPY 2 - ``` - - This creates `/etc/edb/pgbouncer<1.x>/userlist.txt` file and adds the username and password list. + !!!note + For more information on `auth_user` see [Authentication settings](https://www.pgbouncer.org/config.html#authentication-settings). + !!! - !!! Note - If the pem_admin user is a superuser, you must add the password to the authentication file (`enterprisedb` in the example). This allows the pem_admin user to invoke the PEM authentication query function `pem.get_proxy_auth(text)`. - -5. Create an HBA file `(/etc/edb/pgbouncer<1.x>/hba_file)` for PgBouncer that contains the following content: +1. Create an HBA file `(/etc/edb/pgbouncer<1.x>/hba_file)` for PgBouncer that contains the following content: ```ini - # Use authentication method md5 for the local connections to - # connect pem database & pgbouncer (virtual) database. - local pgbouncer all md5 - # Use authentication method md5 for the remote connections to - # connect to pgbouncer (virtual database) using enterprisedb - # user. - - host pgbouncer,pem pem_admin1 0.0.0.0/0 md5 - ``` - - ```ini - # Use authentication method cert for the TCP/IP connections to - # connect the pem database using pem_agent_user1 - + # Use the authentication method scram-sha-256 for local connections + # between the pem database & the pgbouncer (virtual) database. + local pgbouncer all scram-sha-256 + # Use the authentication method scram-sha-256 for remote connections + # to pgbouncer (virtual database) using the enterprisedb user. + host pgbouncer,pem pem_admin1 0.0.0.0/0 scram-sha-256 + # Use the authentication method cert for TCP/IP connections + # to the pem database using pem_agent_user1 hostssl pem pem_agent_user1 0.0.0.0/0 cert ``` -6. Change the owner of the HBA file `(/etc/edb/pgbouncer<1.x>/hba_file)` to `enterprisedb`, and change the directory permissions to `0600`: +1. Change the owner of the HBA file `(/etc/edb/pgbouncer<1.x>/hba_file)` to `enterprisedb`, and change the directory permissions to `0600`: ```shell - $ chown enterprisedb:enterprisedb /etc/edb/pgbouncer<1.x>/hba_file - $ chmod 0600 /etc/edb/pgbouncer<1.x>/hba_file + chown enterprisedb:enterprisedb /etc/edb/pgbouncer<1.x>/hba_file + chmod 0600 /etc/edb/pgbouncer<1.x>/hba_file ``` -7. Enable the PgBouncer service, and start the service: +1. Enable the PgBouncer service, and start the service: ```shell - $ systemctl enable edb-pgbouncer-<1.x> + systemctl enable edb-pgbouncer-<1.x> __OUTPUT__ Created symlink from /etc/systemd/system/multi-user.target.wants/edb-pgbouncer-<1.x>.service @@ -148,5 +135,5 @@ This example runs PgBouncer as the enterprisedb system user and outlines the pro ``` ```shell - $ systemctl start edb-pgbouncer-<1.x> + systemctl start edb-pgbouncer-<1.x> ``` diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx index ca24ea8aec0..9f234f89268 100644 --- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx +++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx @@ -8,16 +8,30 @@ redirects: - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/04_pem_pgbouncer_configuring_pem_agent/ --- -You can use an RPM package to install a PEM agent. For detailed installation information, see [Installating the PEM agent](../../installing_pem_agent/). +## Prerequisites -Don't configure the PEM agent responsible for sending SNMP notifications with pgBouncer. For example, if the default PEM agent installed with PEM server is used for SNMP notifications, don't configure it with pgBouncer. +You have [installed the PEM agent](../../installing_pem_agent/). -## Configuring a new PEM agent (installed via RPM) +!!!note + Do not configure PEM agents with `enable_smtp`, `enable_snmp`, or `enable_webhook` set to `true` in the `agent.cfg` file to connect through PgBouncer. SNMP, SMTP, and Webhook spoolers use the LISTEN/NOTIFY mechanism provided by Postgres to send notifications asynchronously. Since PgBouncer doesn’t support the LISTEN/NOTIFY mechanism in transaction mode, connecting the agent to PgBouncer can cause notifications to be delayed or not delivered at all. Instead, connect the PEM agent directly to the PEM backend database. +!!! -After using an RPM package to install the PEM agent, you must configure it to work against a particular PEM database server. Use the following command: +Now you can choose to [configure a new PEM agent](#configuring-a-new-pem-agent) or [use an existing PEM agent](#configuring-an-existing-pem-agent) for PgBouncer. + +## Configuring a new PEM agent + +After installing the PEM agent, configure it to work with a particular PEM database server. Use the following command: ```shell -$ PGSSLMODE=require PEM_SERVER_PASSWORD=pem_admin1_password /usr/edb/pem/agent/bin/pemworker --register-agent --pem-server 172.16.254.22 --pem-port 6432 --pem-user pem_admin1 --pem-agent-user pem_agent_user1 --display-name *Agent_Name* +PGSSLMODE=require PEM_SERVER_PASSWORD=pem_admin1_password \ + /usr/edb/pem/agent/bin/pemworker \ + --register-agent \ + --pem-server 172.16.254.22 \ + --pem-port 6432 \ + --pem-user pem_admin1 \ + --pem-agent-user pem_agent_user1 \ + --display-name *Agent_Name* \ +__OUTPUT__ Postgres Enterprise Manager Agent registered successfully! ``` @@ -36,7 +50,7 @@ The PEM agent uses the keys to connect to the PEM database server as pem_agent_u A line mentioning the agent-user to use appears in the `agent.cfg` configuration file. For example: ```ini -$ cat /usr/edb/pem/agent/etc/agent.cfg +cat /usr/edb/pem/agent/etc/agent.cfg [PEM/agent] pem_host=172.16.254.22 pem_port=6432 @@ -64,30 +78,30 @@ allow_batch_probes=false heartbeat_connection=false ``` -## Configuring an existing PEM agent (installed via RPM) +## Configuring an existing PEM agent If you're using an existing PEM agent, you can copy the SSL certificate and key files to the target machine and reuse the files. You must modify the files, adding a new parameter and replacing some parameters in the existing `agent.cfg` file. -Add a line to use agent_user as the agent: +1. Add a line to use agent_user as the agent: -```ini -agent_user=pem_agent_user1 -``` + ```ini + agent_user=pem_agent_user1 + ``` -Update the port to specify the pgBouncer port: +1. Update the port to specify the PgBouncer port: -```ini -pem_port=6432 -``` + ```ini + pem_port=6432 + ``` -Update the certificate and key path locations: +1. Update the certificate and key path locations: -```ini -agent_ssl_key=/root/.pem/pem_agent_user1.key -agent_ssl_crt=/root/.pem/pem_agent_user1.crt -``` + ```ini + agent_ssl_key=/root/.pem/pem_agent_user1.key + agent_ssl_crt=/root/.pem/pem_agent_user1.crt + ``` -As an alternative, you can run the agent self-registration script. However, that process creates a new agent id. If run the agent self-registration script, you must replace the new agent id with the existing id and disable the entry for the new agent id in the `pem.agent` table. For example: +As an alternative, you can run the agent self-registration script. However, that process creates a new agent id. If you run the agent self-registration script, you must replace the new agent id with the existing id and disable the entry for the new agent id in the `pem.agent` table. For example: ```sql pem=# UPDATE pem.agent SET active = false WHERE id = ; diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx index d6a460e5bd7..ecdd5a57885 100644 --- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx +++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx @@ -1,5 +1,5 @@ --- -title: "Connection pooling using pgBouncer" +title: "Connection pooling using PgBouncer" navTitle: "Deploying connection pooling" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. @@ -17,10 +17,11 @@ navigation: - configuring_the_pem_agent --- -You can use pgBouncer as a connection pooler for limiting the number of connections from the PEM agent to the Postgres Enterprise Manager (PEM) server on non-Windows machine: +You can use PgBouncer as a connection pooler for limiting the number of connections from the PEM agent to the Postgres Enterprise Manager (PEM) server on non-Windows machines: -- Preparing the PEM Database Server provides information about preparing the PEM database server to be used with pgBouncer. -- Configuring pgBouncer provides detailed information about configuring pgBouncer to make it work with the PEM database server. -- Configuring the PEM agent provides detailed information about configuring a PEM agent to connect to pgBouncer. +- [PEM server and agent connection management mechanism](pem_server_pem_agent_connection_management_mechanism) provides an introduction of the PgBouncer-PEM infrastructure. +- [Preparing the PEM Database Server](preparing_the_pem_database_server) provides information about preparing the PEM database server to be used with PgBouncer. +- [Configuring PgBouncer](configuring_pgBouncer) provides detailed information about configuring PgBouncer to allow it to work with the PEM database server. +- [Configuring the PEM agent](configuring_the_pem_agent) provides detailed information about configuring a PEM agent to connect to PgBouncer. -For detailed information about using the PEM web interface, see the [Accessing the web interface ](../../pem_web_interface). \ No newline at end of file +For detailed information about using the PEM web interface, see the [Accessing the web interface](../../pem_web_interface). \ No newline at end of file diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx index ff5a0a68569..80fe1f6630b 100644 --- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx +++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx @@ -8,17 +8,23 @@ redirects: - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/01_pem_pgbouncer_server_agent_connection/ --- -Each PEM agent connects to the PEM database server using the SSL certificates for each user. For example, an agent with `ID#1` connects to the PEM database server using the agent1 user. +## Without PgBouncer -![Connecting to the PEM database without pgBouncer](../../images/pem_database_without_pgbouncer.png) +In the default configuration, each PEM agent connects to the PEM database server directly using SSL for encryption. Each PEM agent uses its own dedicated user for the connection. -Prior to PEM version 7.5, the following limitations disallowed the use of the connection pooler between the PEM server and PEM agent: +For example, a user agent with ID 1 connects to the PEM database server as agent1. -- The PEM agent uses an SSL certificate to connect to the PEM database server. -- It uses an individual user identifier when connecting to the PEM database server. +![Connecting to the PEM database without PgBouncer](../../images/pem_without_pgbouncer.png) -EDB modified the PEM agent to allow the agent to use a common database user (instead of the dedicated agent users) to connect to the PEM database server. +## With PgBouncer -![Connecting to pgBouncer.](../../images/pem_database_with_pgbouncer.png) +In a PgBouncer-enabled environment, PEM agents cannot connect to the PEM database server directly. PEM agents must use a proxy user that you configure specifically for the connection to PgBouncer. In the example, the proxy user handling all PEM agent connections is called `pem_agent_user1`. + +Once the PEM agents connect to PgBouncer using SSL, PgBouncer is responsible for managing connection requests to the PEM database server. PgBouncer uses the rules you have configured for connection pooling to manage the incoming connection requests, for example, by respecting the established maximum number of active connections. + +![Connecting to pgBouncer.](../../images/pem_with_pgbouncer.png) + +!!!note + Use PgBouncer version 1.9.0 or later as the connection pooler. Versions 1.9.0 or later support cert authentication. +!!! -We recommend using PgBouncer version 1.9.0 or later as the connection pooler. Versions 1.9.0 or later support cert authentication. PEM agents can connect to pgBouncer using SSL certificates. diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx index eac027e19e2..0ddeb82c21c 100644 --- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx +++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx @@ -8,30 +8,59 @@ redirects: - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/02_pem_pgbouncer_preparing_dbserver/ --- -You must configure the PEM database server to work with PgBouncer. This example shows how to configure the PEM database server. +You must configure dedicated users and create an SSL key and certificate on the PEM database server to enable connection pooling for PEM with PgBouncer. -1. Create a dedicated user named pgbouncer on the PEM database server: +This example shows how to prepare the PEM database server with the `enterprisedb` user on a RHEL-based operating system with EDB Postgres Advanced Server version 16. The location of your data, the configuration and key files, and the user you employ to perform the configuration may differ depending on your OS and Postgres distribution. + +## Prerequisites + +- You are connected to the `pem` database of the PEM database server. + +- You are connected as `enterprisedb` or `postgres` user. The user depends on your Postgres distribution. + + | Postgres distribution | User | + |------------------------------|--------------| + | EDB Postgres Advanced Server | enterprisedb | + | EDB Postgres Extended Server | postgres | + | PostgreSQL | postgres | + +## Creating users and roles for PgBouncer-PEM connections + +1. Create a dedicated user named pgbouncer with `pem_agent_pool` membership. This user will serve connections from PgBouncer to the PEM database by forwarding all agent database queries. ```sql - CREATE USER pgbouncer PASSWORD 'ANY_PASSWORD' LOGIN; + CREATE ROLE pgbouncer PASSWORD 'ANY_PASSWORD' LOGIN; __OUTPUT__ CREATE ROLE ``` -2. Create a user named pem_admin1 (not a superuser) with `pem_admin` and `pem_agent_pool role` membership on the PEM database server: + ```sql + GRANT pem_agent_pool TO pgbouncer; + __OUTPUT__ + GRANT ROLE + ``` + +1. Create a user named pem_admin1 (not a superuser) with `pem_admin` and `pem_agent_pool` role membership. This user is used to register the agent to the PEM server and manage access to the PEM database. ```sql - CREATE USER pem_admin1 PASSWORD 'ANY_PASSWORD' LOGIN CREATEROLE; + CREATE ROLE pem_admin1 PASSWORD 'ANY_PASSWORD' LOGIN CREATEROLE; __OUTPUT__ CREATE ROLE ``` + ```sql - GRANT pem_admin, pem_agent_pool TO pem_admin1; + GRANT pem_agent_pool TO pem_admin1; __OUTPUT__ GRANT ROLE ``` -3. Grant CONNECT privileges to the pgbouncer user on the `pem` database: + ```sql + GRANT pem_agent TO pem_admin1 WITH ADMIN OPTION; + __OUTPUT__ + GRANT ROLE + ``` + +1. Grant CONNECT privileges to the pgbouncer user: ```sql GRANT CONNECT ON DATABASE pem TO pgbouncer; @@ -39,7 +68,7 @@ You must configure the PEM database server to work with PgBouncer. This example GRANT ``` -4. Grant USAGE privileges to the pgbouncer user for the `pem` schema on the `pem` database: +1. Grant USAGE privileges to the pgbouncer user for the `pem` schema: ```sql GRANT USAGE ON SCHEMA pem TO pgbouncer; @@ -47,7 +76,7 @@ You must configure the PEM database server to work with PgBouncer. This example GRANT ``` -5. Grant EXECUTE privileges to the pgbouncer user on the `pem.get_agent_pool_auth(text)` function in the `pem` database. For example: +1. Grant EXECUTE privileges to the pgbouncer user on the `pem.get_agent_pool_auth(text)` function. For example: ```sql GRANT EXECUTE ON FUNCTION pem.get_agent_pool_auth(text) TO pgbouncer; @@ -55,7 +84,7 @@ You must configure the PEM database server to work with PgBouncer. This example GRANT ``` -6. Use the `pem.create_proxy_agent_user(varchar)` function to create a user named pem_agent_user1 on the PEM database server: +1. Use the `pem.create_proxy_agent_user(varchar)` function to create a user named pem_agent_user1. This proxy user will serve connections between all Agents and PgBouncer. ```sql SELECT pem.create_proxy_agent_user('pem_agent_user1'); @@ -65,14 +94,107 @@ You must configure the PEM database server to work with PgBouncer. This example (1 row) ``` - The function creates a user with the same name and a random password and grants pem_agent and pem_agent_pool roles to the user. This approach allows pgBouncer to use a proxy user on behalf of the agent. + The function creates a user with the same name and a random password and grants pem_agent and pem_agent_pool roles to the user. This approach allows PgBouncer to use a proxy user on behalf of the agent. + +## Updating the configuration files to allow PgBouncer-PEM connections + +1. Allow the pgbouncer user to connect to the `pem` database using the SSL authentication method by adding the `hostssl pem` entry in the `pg_hba.conf` file of the PEM database server. + + In the list of rules, ensure you place the `hostssl pem` entry before any other rules assigned to the `+pem_agent` user. + + ```shell + # Allow the PEM agent proxy user (used by pgbouncer) + # to connect the to PEM server using SSL + + hostssl pem +pem_agent_pool 127.0.0.1/32 cert map=pem_agent_pool + ``` -7. Add the following entries to the start of the `pg_hba.conf` file of the PEM database server. These entries allow the pgBouncer user to connect to the `pem` database using the md5 authentication method. +1. Allow the PEM server to map all users involved in PgBouncer-PEM connections by adding these lines to the `$PGDATA/pg_ident.conf` user mapping file: ```shell - # Allow the PEM agent proxy user (used by - # pgbouncer) to connect the to PEM server using - # md5 + pem_agent_pool pem_agent_pool pem_agent_user1 + pem_agent_pool pem_agent_pool pem_admin1 + pem_agent_pool pem_agent_pool pgbouncer + ``` + +1. Restart the Postgres service. Replace the `` placeholder with the name of the Postgres instance systemd service name: + + ```shell + systemctl restart + ``` + +## Creating the SSL key and certificate for PgBouncer-PEM authentication + +Create a key and certificate for the `pem_agent_pool` group role. Then, move the files to the PgBouncer instance to allow authentication between the PEM database server and PgBouncer. + +This example runs EDB Postgres Advanced Server on RHEL. When setting your environment variables, choose the correct directories according to your operating system and Postgres distribution. - local pem pgbouncer,pem_admin1 md5 +1. Set the `$DATA_DIR` environment variable to your data directory: + + ```shell + export DATA_DIR=/var/lib/edb/as16/data + ``` + +
Data directories per OS and Postgres version +
+ Here are some examples of other default data directories per operating system and Postgres version. + + | Postgres version | RHEL/Rocky Linux/AlmaLinux/SLES | Debian/Ubuntu | + |---------------------------------------|---------------------------------|--------------------------| + | EDB Postgres
Advanced Server 16 | /var/lib/edb/as16/data | /var/lib/edb-as/16/main | + | EDB Postgres
Extended Server 16 | /var/lib/edb/edb-pge/16/data | /var/lib/edb-pge/16/main | + | PostgreSQL 16 | /var/lib/edb/pgsql/16/data | /etc/postgresql/16/main | + +
+ +
+ +1. Set the `$USER_HOME` environment variable to the home directory accesible to the user: + + ```shell + export USER_HOME=/var/lib/edb + ``` + +
User home directories per OS and Postgres version +
+ Here are some examples of other default home directories per operating system and Postgres version. + + | Postgres version | RHEL/Rocky Linux/AlmaLinux/SLES | Debian/Ubuntu | + |---------------------------------------|---------------------------------|---------------------| + | EDB Postgres
Advanced Server 16 | /var/lib/edb | /var/lib/edb-as | + | EDB Postgres
Extended Server 16 | /var/lib/pgsql | /var/lib/postgresql | + | PostgreSQL 16 | /var/lib/pgsql | /var/lib/postgresql | + +
+ +
+ +1. Create the signing key with openssl: + + ```shell + openssl genrsa -out pem_agent_pool.key 4096 + ``` + +1. Create a certificate-signing request (CSR). Replace the `-subj` attributes in `<...>` as required. Ensure the Common Name (CN) is set to the `pem_agent_pool` group role name: + + ```shell + openssl req -new -key pem_agent_pool.key -out pem_agent_pool.csr -subj '/C=/ST=/L=/O=/CN=pem_agent_pool' + ``` + +1. Use the PEM CA and key to sign the CSR: + + ``` + openssl x509 -req -days 365 -in pem_agent_pool.csr -CA $DATA_DIR/ca_certificate.crt -CAkey $DATA_DIR/ca_key.key -CAcreateserial -out pem_agent_pool.crt + ``` + +1. Move the created key and certificate to a path the `enterprisedb` user can access. + + In this example, create a folder called `~/.postgresql` in the home directory of the `enterprisedb` user and ensure it has permissions: + + ``` + mkdir -p $USER_HOME/.postgresql + mv pem_agent_pool.key pem_agent_pool.crt $USER_HOME/.postgresql + chmod 0600 $USER_HOME/.postgresql/pem_agent_pool.key + chmod 0644 $USER_HOME/.postgresql/pem_agent_pool.crt + chown enterprisedb:enterprisedb $USER_HOME/.postgresql/pem_agent_pool.* ``` diff --git a/product_docs/docs/pem/9/images/pem_database_with_pgbouncer.png b/product_docs/docs/pem/9/images/pem_database_with_pgbouncer.png deleted file mode 100755 index ea025f63969..00000000000 --- a/product_docs/docs/pem/9/images/pem_database_with_pgbouncer.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:226d52b5c8e60a4d22ca95b4951a5e70e0814f723050bde1542a2b4fc51de929 -size 197095 diff --git a/product_docs/docs/pem/9/images/pem_database_without_pgbouncer.png b/product_docs/docs/pem/9/images/pem_database_without_pgbouncer.png deleted file mode 100755 index 2eca5f6c393..00000000000 --- a/product_docs/docs/pem/9/images/pem_database_without_pgbouncer.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9c376a383bbc5a5999589d5932fa27004f6453dc3f14bf99686143743f8147fc -size 180375 diff --git a/product_docs/docs/pem/9/images/pem_with_pgbouncer.png b/product_docs/docs/pem/9/images/pem_with_pgbouncer.png new file mode 100644 index 00000000000..21c0437c063 --- /dev/null +++ b/product_docs/docs/pem/9/images/pem_with_pgbouncer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0da3b93bf4127a528f10a1c58ef48c267cf66531dad13efec2aee8c17da527b9 +size 188773 diff --git a/product_docs/docs/pem/9/images/pem_without_pgbouncer.png b/product_docs/docs/pem/9/images/pem_without_pgbouncer.png new file mode 100644 index 00000000000..18e9d3ee630 --- /dev/null +++ b/product_docs/docs/pem/9/images/pem_without_pgbouncer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37ecb7464eb33bf09494d885685a7a8f91909ce2761a434c47bd1f905605c574 +size 215177 diff --git a/product_docs/docs/pem/9/monitoring_performance/probes.mdx b/product_docs/docs/pem/9/monitoring_performance/probes.mdx index 2a8e9b3609c..ae222f17b39 100644 --- a/product_docs/docs/pem/9/monitoring_performance/probes.mdx +++ b/product_docs/docs/pem/9/monitoring_performance/probes.mdx @@ -178,18 +178,18 @@ Use the **General** tab to modify the definition of an existing probe or to spec - The **Mandatory columns** column indicates the coloumns you must configure in the probe query to ensure the required data is collected. - The **Probe examples** column provides some existing probes you can explore to better understand how probes are used in practice. - | Target type | Execution level | Mandatory columns | Probe examples | - |-------------|-----------------|------------------------------------------------------|----------------| - | Agent | Agent | None | cpu_usage | - | Server | Server | None | | - | Database | Database | None | | - | Schema | Database | schema_name | | - | Table | Database | schema_name, table_name | | - | Index | Database | schema_name, index_name | index_size | - | Sequence | Database | schema_name, sequence_name | | - | View | Database | schema_name, view_name | | - | Function | Database | schema_name, arg_types, function_type, function_name | | - | Extension | Extension | None | Extension | + | Target type | Execution level | Mandatory columns | Probe examples | + |-------------|-----------------|------------------------------------------------------|---------------------| + | Agent | Agent | None | cpu_usage | + | Server | Server | None | server_info | + | Database | Database | None | database_size | + | Schema | Database | schema_name | oc_extension | + | Table | Database | schema_name, table_name | table_size | + | Index | Database | schema_name, index_name | index_size | + | Sequence | Database | schema_name, sequence_name | oc_sequence | + | View | Database | schema_name, view_name | mview_size | + | Function | Database | schema_name, arg_types, function_type, function_name | function_statistics | + | Extension | Extension | None | bdr_node_summary | !!!note - The custom probes set to a database or larger target type (including schema, table, index, view, sequence, and functions) collect the information at the database level. diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/identify_images/private_registries.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/identify_images/private_registries.mdx index 87b57313330..445666507a6 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/identify_images/private_registries.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/identify_images/private_registries.mdx @@ -43,7 +43,7 @@ log in to the EDB container registry, for example, through `docker login` or a In the [repos page in EDB](https://www.enterprisedb.com/repos-downloads), is an EDB Repos 2.0 section where a repo token appears obscured. -![EDB Repo Portal](images/edb-repo-portal.png) +![EDB Repo Portal](../images/edb-repo-portal.png) Next to the repo token is a **Copy Token** button to copy the token and an eye icon for looking at the content of the token. diff --git a/product_docs/docs/tde/15/enabling_tde.mdx b/product_docs/docs/tde/15/enabling_tde.mdx index 32177b2061f..79b24445228 100644 --- a/product_docs/docs/tde/15/enabling_tde.mdx +++ b/product_docs/docs/tde/15/enabling_tde.mdx @@ -114,7 +114,8 @@ You can find out whether TDE is present on a server by querying the `data_encryp A value of 0 means TDE isn't enabled. Any nonzero value reflects the version of TDE in use. Currently, when TDE is enabled, this value is 1. ```sql -# select data_encryption_version from pg_control_init(); +select data_encryption_version from pg_control_init(); +__OUTPUT__ data_encryption_version ------------------------- 1 diff --git a/product_docs/docs/tpa/23/rel_notes/index.mdx b/product_docs/docs/tpa/23/rel_notes/index.mdx index 086be0dc0a5..70411fab830 100644 --- a/product_docs/docs/tpa/23/rel_notes/index.mdx +++ b/product_docs/docs/tpa/23/rel_notes/index.mdx @@ -2,6 +2,7 @@ title: Trusted Postgres Architect release notes navTitle: "Release notes" navigation: + - tpa_23.34.1_rel_notes - tpa_23.34_rel_notes - tpa_23.33_rel_notes - tpa_23.32_rel_notes @@ -32,6 +33,7 @@ The Trusted Postgres Architect documentation describes the latest version of Tru | Version | Release date | | ---------------------------- | ------------ | +| [23.35](tpa_23.34.1_rel_notes) | 09 Sep 2024 | | [23.34](tpa_23.34_rel_notes) | 22 Aug 2024 | | [23.33](tpa_23.33_rel_notes) | 24 Jun 2024 | | [23.32](tpa_23.32_rel_notes) | 15 May 2024 | diff --git a/product_docs/docs/tpa/23/rel_notes/tpa_23.34.1_rel_notes.mdx b/product_docs/docs/tpa/23/rel_notes/tpa_23.34.1_rel_notes.mdx new file mode 100644 index 00000000000..0d8e21cbce7 --- /dev/null +++ b/product_docs/docs/tpa/23/rel_notes/tpa_23.34.1_rel_notes.mdx @@ -0,0 +1,12 @@ +--- +title: Trusted Postgres Architect 23.34.1 release notes +navTitle: "Version 23.34.1" +--- + +Released: 9 September 2024 + +Trusted Postgres Architect 23.34.1 is a bug fix release which resolves the following issues: + +| Type | Description | +|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Bug Fix | Fixed an issue whereby running deploy after a switchover fails for nodes with `efm-witness` role. The `upstream-primary` for EFM nodes is determined using the facts gathered from Postgres. This previously failed for nodes with `efm-witness` roles since they do not have Postgres. The task to determine upstream-primary is now run only on nodes with `primary` or `replica` roles. | \ No newline at end of file diff --git a/tools/automation/actions/jira-sync/jira.js b/tools/automation/actions/jira-sync/jira.js index 2195b953052..e25784ff0eb 100644 --- a/tools/automation/actions/jira-sync/jira.js +++ b/tools/automation/actions/jira-sync/jira.js @@ -121,15 +121,15 @@ async function loadGHIssues(issueNumber) { return ret; } -async function loadSynchedJiraIssues() { +async function loadSynchedJiraIssues(accumulateIssues) { console.log(`Loading synched Jira issues`); - const query = `summary ~ "\\"Docs GH #\\"" order by created DESC`; + const query = `summary ~ "\\"Docs GH #\\"" order by created ASC`; try { const response = await fetch( `https://enterprisedb.atlassian.net/rest/api/3/search?jql=${encodeURIComponent( query, - )}`, + )}&startAt=${(accumulateIssues || []).length}&maxResults=100`, { method: "GET", headers: { @@ -139,7 +139,10 @@ async function loadSynchedJiraIssues() { }, ); const json = await response.json(); - return json?.issues; + accumulateIssues = [...(accumulateIssues || []), ...(json?.issues || [])]; + if (json?.total > json?.startAt + json?.maxResults) + return loadSynchedJiraIssues(accumulateIssues); + return accumulateIssues; } catch (err) { console.error(err); } diff --git a/tools/automation/actions/link-check/index.js b/tools/automation/actions/link-check/index.js index b3b97d0edfb..c4d64add6ff 100644 --- a/tools/automation/actions/link-check/index.js +++ b/tools/automation/actions/link-check/index.js @@ -16,6 +16,7 @@ import GithubSlugger from "github-slugger"; import toVfile from "to-vfile"; const { read, write } = toVfile; +const imageExts = [".png", ".svg", ".jpg", ".jpeg", ".gif"]; const docsUrl = "https://www.enterprisedb.com/docs"; // add path here to ignore link warnings const noWarnPaths = [ @@ -158,6 +159,30 @@ async function main() { await scanner.run(ast, input); } + const imageFiles = await glob( + imageExts.flatMap((ext) => [ + path.resolve(basePath, "product_docs/**/*" + ext), + path.resolve(basePath, "advocacy_docs/**/*" + ext), + ]), + ); + + for (const sourcePath of imageFiles) { + const metadata = { + canonical: fsPathToURLPath(sourcePath), + index: false, + slugs: [], + redirects: [], + source: sourcePath, + }; + allValidUrlPaths.set(metadata.canonical, metadata); + if (isVersioned(sourcePath)) { + const splitPath = metadata.canonical.split(path.posix.sep); + metadata.product = splitPath[1]; + metadata.version = splitPath[2]; + allValidUrlPaths.set(latestVersionURLPath(sourcePath), metadata); + } + } + // compile product versions const productVersions = {}; @@ -356,9 +381,16 @@ function cleanup() { const mapUrlToCanonical = (url, position) => { let test = normalizeUrl(url, metadata.canonical, metadata.index); + if ( + test.href === + docsUrl + "/edb-postgres-ai/analytics/images/level-50.png" + ) + debugger; if (!test.href.startsWith(docsUrl)) return url; if (test.href === docsUrl) return url; - if (path.posix.extname(test.pathname)) return url; + const ext = path.posix.extname(test.pathname); + const isImageUrl = imageExts.includes(ext); + if (ext && !isImageUrl) return url; metadata.linksChecked = metadata.linksChecked || 0 + 1; @@ -440,7 +472,7 @@ function cleanup() { return url; }; - visitParents(tree, ["link", "element"], (node) => { + visitParents(tree, ["link", "image", "element"], (node) => { try { if ( node.type === "element" && @@ -451,7 +483,7 @@ function cleanup() { node.properties.href, node.position, ); - else if (node.type === "link") + else if (node.type === "link" || node.type === "image") node.url = mapUrlToCanonical(node.url, node.position); } catch (e) { file.message(e, node.position); @@ -494,11 +526,14 @@ function fsPathToURLPath(fsPath) { // 2. strip trailing index.mdx // 3. strip trailing .mdx // 4. strip trailing / + // URL encode const docsLocations = /product_docs\/docs|advocacy_docs/; - return fsPath - .split(docsLocations)[1] - .replace(/\/index\.mdx$|\.mdx$/, "") - .replace(/\/$/, ""); + return encodeURI( + fsPath + .split(docsLocations)[1] + .replace(/\/index\.mdx$|\.mdx$/, "") + .replace(/\/$/, ""), + ); } function latestVersionURLPath(fsPath) {