diff --git a/tembo-operator/Cargo.lock b/tembo-operator/Cargo.lock index a4de24304..3dc780ff5 100644 --- a/tembo-operator/Cargo.lock +++ b/tembo-operator/Cargo.lock @@ -494,7 +494,11 @@ dependencies = [ [[package]] name = "controller" +<<<<<<< Updated upstream version = "0.27.1" +======= +version = "0.28.0" +>>>>>>> Stashed changes dependencies = [ "actix-web", "anyhow", diff --git a/tembo-operator/Cargo.toml b/tembo-operator/Cargo.toml index ccf151bfb..708a5b637 100644 --- a/tembo-operator/Cargo.toml +++ b/tembo-operator/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "controller" description = "Tembo Operator for Postgres" -version = "0.27.1" +version = "0.28.0" edition = "2021" default-run = "controller" license = "Apache-2.0" diff --git a/tembo-operator/src/apis/coredb_types.rs b/tembo-operator/src/apis/coredb_types.rs index 16f073c6d..77bd5d006 100644 --- a/tembo-operator/src/apis/coredb_types.rs +++ b/tembo-operator/src/apis/coredb_types.rs @@ -364,9 +364,8 @@ pub struct CoreDBSpec { #[serde(default = "defaults::default_pkglibdir_storage")] pub pkglibdirStorage: Quantity, - /// Enable the use of the Postgres Exporter deployment for metrics collection - /// - /// **Default**: true. + /// **DEPRECATED** Enable the use of the Postgres Exporter deployment for metrics collection + /// This is no longer used and will be removed in a future release. #[serde(default = "defaults::default_postgres_exporter_enabled")] pub postgresExporterEnabled: bool, @@ -379,9 +378,8 @@ pub struct CoreDBSpec { #[serde(default = "defaults::default_image")] pub image: String, - /// The postgres-exporter image you want to use for the postgres-exporter deployment. - /// - /// **Default**: quay.io/prometheuscommunity/postgres-exporter:v0.12.0 + /// **DEPRECATED** The postgres-exporter image you want to use for the postgres-exporter deployment. + /// This is no longer used and will be removed in a future release. #[serde(default = "defaults::default_postgres_exporter_image")] pub postgresExporterImage: String, diff --git a/tembo-operator/src/cloudnativepg/cnpg.rs b/tembo-operator/src/cloudnativepg/cnpg.rs index 1e964caf2..aaec71784 100644 --- a/tembo-operator/src/cloudnativepg/cnpg.rs +++ b/tembo-operator/src/cloudnativepg/cnpg.rs @@ -44,6 +44,7 @@ use crate::{ defaults::{default_image, default_llm_image}, errors::ValueError, is_postgres_ready, patch_cdb_status_merge, + postgres_exporter::EXPORTER_CONFIGMAP_PREFIX, psql::PsqlOutput, trunk::extensions_that_require_load, Context, RESTARTED_AT, @@ -601,6 +602,20 @@ pub fn cnpg_cluster_from_cdb( }) } + if cdb + .spec + .metrics + .as_ref() + .and_then(|m| m.queries.as_ref()) + .is_some() + { + let configmap = format!("{}{}", EXPORTER_CONFIGMAP_PREFIX, cdb.name_any()); + metrics.push(ClusterMonitoringCustomQueriesConfigMap { + key: "tembo-queries".to_string(), + name: configmap, + }) + } + Cluster { metadata: ObjectMeta { name: Some(name.clone()), diff --git a/tembo-operator/src/configmap.rs b/tembo-operator/src/configmap.rs index eb61060f6..7caece992 100644 --- a/tembo-operator/src/configmap.rs +++ b/tembo-operator/src/configmap.rs @@ -72,10 +72,13 @@ pub async fn apply_configmap( cm_name: &str, data: BTreeMap, ) -> Result<(), Error> { + let mut labels: BTreeMap = BTreeMap::new(); + labels.insert("cnpg.io/reload".to_owned(), "true".to_owned()); let cm_api: Api = Api::namespaced(client, namespace); let cm = ConfigMap { metadata: ObjectMeta { name: Some(cm_name.to_string()), + labels: Some(labels), ..Default::default() }, data: Some(data), diff --git a/tembo-operator/src/controller.rs b/tembo-operator/src/controller.rs index 446557e71..bdb737ed8 100644 --- a/tembo-operator/src/controller.rs +++ b/tembo-operator/src/controller.rs @@ -9,7 +9,6 @@ use crate::{ cnpg::{cnpg_cluster_from_cdb, reconcile_cnpg, reconcile_cnpg_scheduled_backup, reconcile_pooler}, }, config::Config, - deployment_postgres_exporter::reconcile_prometheus_exporter_deployment, exec::{ExecCommand, ExecOutput}, extensions::database_queries::is_not_restarting, heartbeat::reconcile_heartbeat, @@ -17,7 +16,6 @@ use crate::{ postgres_certificates::reconcile_certificates, psql::{PsqlCommand, PsqlOutput}, secret::{reconcile_postgres_role_secret, reconcile_secret}, - service::reconcile_prometheus_exporter_service, telemetry, Error, Metrics, Result, }; use k8s_openapi::{ @@ -270,22 +268,11 @@ impl CoreDB { reconcile_cnpg_scheduled_backup(self, ctx.clone()).await?; } - if self.spec.postgresExporterEnabled { - debug!("Reconciling prometheus exporter deployment"); - reconcile_prometheus_exporter_deployment(self, ctx.clone()) - .await - .map_err(|e| { - error!("Error reconciling prometheus exporter deployment: {:?}", e); - Action::requeue(Duration::from_secs(300)) - })?; - }; - - // reconcile service - debug!("Reconciling prometheus exporter service"); - reconcile_prometheus_exporter_service(self, ctx.clone()) + // Cleanup old Postgres Exporter Deployments, Service, ServiceAccount, Role and RoleBinding + crate::deployment_postgres_exporter::cleanup_postgres_exporter(self, ctx.clone()) .await .map_err(|e| { - error!("Error reconciling service: {:?}", e); + error!("Error reconciling prometheus exporter deployment: {:?}", e); Action::requeue(Duration::from_secs(300)) })?; diff --git a/tembo-operator/src/defaults.rs b/tembo-operator/src/defaults.rs index 0369439b6..e6565fc01 100644 --- a/tembo-operator/src/defaults.rs +++ b/tembo-operator/src/defaults.rs @@ -30,7 +30,7 @@ pub fn default_resources() -> ResourceRequirements { } pub fn default_postgres_exporter_enabled() -> bool { - true + false } pub fn default_uid() -> i32 { diff --git a/tembo-operator/src/deployment_postgres_exporter.rs b/tembo-operator/src/deployment_postgres_exporter.rs index 86bdd3865..1bdfd9af5 100644 --- a/tembo-operator/src/deployment_postgres_exporter.rs +++ b/tembo-operator/src/deployment_postgres_exporter.rs @@ -1,237 +1,267 @@ -use crate::{ - apis::coredb_types::CoreDB, - defaults::default_postgres_exporter_image, - postgres_exporter::{EXPORTER_CONFIGMAP_PREFIX, EXPORTER_VOLUME, QUERIES_YAML}, - rbac::reconcile_rbac, - Context, Error, Result, -}; -use k8s_openapi::{ - api::{ - apps::v1::{Deployment, DeploymentSpec}, - core::v1::{ - ConfigMapVolumeSource, Container, ContainerPort, EnvVar, EnvVarSource, HTTPGetAction, PodSpec, - PodTemplateSpec, Probe, SecretKeySelector, SecurityContext, Volume, VolumeMount, - }, - rbac::v1::PolicyRule, - }, - apimachinery::pkg::{apis::meta::v1::LabelSelector, util::intstr::IntOrString}, -}; -use kube::{ - api::{Api, ObjectMeta, Patch, PatchParams, ResourceExt}, - Resource, -}; -use std::{collections::BTreeMap, sync::Arc}; -use tracing::instrument; +use crate::{apis::coredb_types::CoreDB, Context, Error, Result}; +use k8s_openapi::api::apps::v1::Deployment; +use kube::api::{Api, ListParams, ResourceExt}; +use std::sync::Arc; +use tracing::{debug, error}; -const PROM_CFG_DIR: &str = "/prometheus"; +// const PROM_CFG_DIR: &str = "/prometheus"; -#[instrument(skip(cdb, ctx), fields(instance_name = %cdb.name_any()))] -pub async fn reconcile_prometheus_exporter_deployment(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { +// Top level function to cleanup all postgres-exporter resources +// this includes the deployment, service and rbac +pub async fn cleanup_postgres_exporter(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { + delete_postgres_exporter_deployment(cdb, ctx.clone()).await?; + crate::service::delete_postgres_exporter_service(cdb, ctx.clone()).await?; + crate::rbac::cleanup_postgres_exporter_rbac(cdb, ctx.clone()).await?; + Ok(()) +} + +// Delete the postgres-exporter Deployment from the cluster +async fn delete_postgres_exporter_deployment(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { let client = ctx.client.clone(); - let coredb_name = cdb.metadata.name.clone().expect("should always have a name"); let ns = cdb.namespace().unwrap(); - let name = format!("{}-metrics", cdb.name_any()); - let mut labels: BTreeMap = BTreeMap::new(); let deployment_api: Api = Api::namespaced(client, &ns); - let oref = cdb.controller_owner_ref(&()).unwrap(); - labels.insert("app".to_owned(), "postgres-exporter".to_string()); - labels.insert("component".to_owned(), "metrics".to_string()); - labels.insert("coredb.io/name".to_owned(), cdb.name_any()); - - // Format the postgres-exporter connection URI - // Check if cnpg is enabled, if so then set the URI to the cnpg service - // Otherwise, use the old coredb service - let psql_uri: String = format!("{}-rw.{}.svc.cluster.local:5432/postgres", cdb.name_any(), ns); - - // reconcile rbac(service account, role, role binding) for the postgres-exporter - let rbac = reconcile_rbac( - cdb, - ctx.clone(), - Some("metrics"), - create_policy_rules(name.clone()).await, - ) - .await?; - - // Generate the ObjectMeta for the Deployment - let deployment_metadata = ObjectMeta { - name: Some(name.to_owned()), - namespace: Some(ns.to_owned()), - labels: Some(labels.clone()), - owner_references: Some(vec![oref]), - ..ObjectMeta::default() - }; - // 0 replicas on deployment when stopping - // 1 replica in all other cases - let replicas = match cdb.spec.stop { - true => 0, - false => 1, - }; + // Define the label selector based on your deployment labels + let label_selector = + "app=postgres-exporter,component=metrics,coredb.io/name=".to_owned() + &cdb.name_any(); + let lp = ListParams::default().labels(&label_selector); - // Generate the Probe for the Container - let readiness_probe = Probe { - http_get: Some(HTTPGetAction { - path: Some("/metrics".to_string()), - port: IntOrString::String("metrics".to_string()), - ..HTTPGetAction::default() - }), - initial_delay_seconds: Some(3), - ..Probe::default() - }; - - // Generate ContainerPort for the Container - let container_port = vec![ContainerPort { - container_port: 9187, - name: Some("metrics".to_string()), - protocol: Some("TCP".to_string()), - ..ContainerPort::default() - }]; - - // Generate SecurityContext for the Container - let security_context = SecurityContext { - run_as_user: Some(65534), - allow_privilege_escalation: Some(false), - ..SecurityContext::default() - }; - - // Generate EnvVar for the Container - let env_vars = vec![ - EnvVar { - name: "DATA_SOURCE_URI".to_string(), - value: Some(psql_uri.clone()), - ..EnvVar::default() - }, - EnvVar { - name: "DATA_SOURCE_USER".to_string(), - value: Some("postgres_exporter".to_string()), - ..EnvVar::default() - }, - // Set EnvVar from a secret - EnvVar { - name: "DATA_SOURCE_PASS".to_string(), - value_from: Some(EnvVarSource { - secret_key_ref: Some(SecretKeySelector { - key: "password".to_string(), - name: Some(format!("{}-exporter", coredb_name.clone())), - optional: Some(false), - }), - ..EnvVarSource::default() - }), - ..EnvVar::default() - }, - EnvVar { - name: "PG_EXPORTER_EXTEND_QUERY_PATH".to_string(), - value: Some(format!("{PROM_CFG_DIR}/{QUERIES_YAML}")), - ..EnvVar::default() - }, - ]; - - // Generate VolumeMounts for the Container - let exporter_vol_mounts = if let Some(metrics) = &cdb.spec.metrics { - if metrics.queries.is_some() { - vec![VolumeMount { - name: EXPORTER_VOLUME.to_owned(), - mount_path: PROM_CFG_DIR.to_string(), - ..VolumeMount::default() - }] - } else { - vec![] - } - } else { - vec![] - }; + // List deployments with specified labels + let deployments = deployment_api.list(&lp).await?; - // Generate Volumes for the PodSpec - let exporter_volumes = if let Some(metrics) = &cdb.spec.metrics { - if metrics.queries.is_some() { - vec![Volume { - config_map: Some(ConfigMapVolumeSource { - name: Some(format!("{}{}", EXPORTER_CONFIGMAP_PREFIX.to_owned(), coredb_name)), - ..ConfigMapVolumeSource::default() - }), - name: EXPORTER_VOLUME.to_owned(), - ..Volume::default() - }] - } else { - vec![] + // Delete the deployment + for deployment in deployments { + if let Some(deployment_name) = deployment.metadata.name { + match deployment_api.delete(&deployment_name, &Default::default()).await { + Ok(_) => { + debug!( + "Deleted Deployment: {}, for instance {}", + deployment_name, + cdb.name_any() + ); + } + Err(e) => { + error!( + "Error deleting Deployment: {}, for instance {}", + e, + cdb.name_any() + ); + return Err(Error::KubeError(e)); + } + } } - } else { - vec![] - }; - - // Generate the PodSpec for the PodTemplateSpec - let pod_spec = PodSpec { - containers: vec![Container { - env: Some(env_vars), - image: Some(get_exporter_image(&cdb.clone())), - name: "postgres-exporter".to_string(), - ports: Some(container_port), - readiness_probe: Some(readiness_probe), - security_context: Some(security_context), - volume_mounts: Some(exporter_vol_mounts), - ..Container::default() - }], - service_account: rbac.service_account.metadata.name.clone(), - service_account_name: rbac.service_account.metadata.name.clone(), - volumes: Some(exporter_volumes), - ..PodSpec::default() - }; - - // Generate the PodTemplateSpec for the DeploymentSpec - let pod_template_spec = PodTemplateSpec { - metadata: Some(deployment_metadata.clone()), - spec: Some(pod_spec), - }; - - // Generate the DeploymentSpec for the Deployment - let deployment_spec = DeploymentSpec { - replicas: Some(replicas), - selector: LabelSelector { - match_labels: Some(labels.clone()), - ..LabelSelector::default() - }, - template: pod_template_spec, - ..DeploymentSpec::default() - }; - - // Generate the Deployment for Prometheus Exporter - let deployment = Deployment { - metadata: deployment_metadata, - spec: Some(deployment_spec), - ..Deployment::default() - }; - - let ps = PatchParams::apply("cntrlr").force(); - let _o = deployment_api - .patch(&name, &ps, &Patch::Apply(&deployment)) - .await - .map_err(Error::KubeError)?; + } Ok(()) } -// Generate the PolicyRules for the Role -#[instrument(fields(instance_name = %name))] -async fn create_policy_rules(name: String) -> Vec { - vec![ - // This policy allows get, watch access to a secret in the namespace - PolicyRule { - api_groups: Some(vec!["".to_owned()]), - resource_names: Some(vec![format!("{}", name)]), - resources: Some(vec!["secrets".to_owned()]), - verbs: vec!["get".to_string(), "watch".to_string()], - ..PolicyRule::default() - }, - ] -} - -fn get_exporter_image(cdb: &CoreDB) -> String { - // Check if cdb.spec.postgresExporterImage is set - // If so, use that image; otherwise, use the default - // image from default_postgres_exporter_image() function - if cdb.spec.postgresExporterImage.is_empty() { - default_postgres_exporter_image() - } else { - cdb.spec.postgresExporterImage.clone() - } -} +// #[instrument(skip(cdb, ctx), fields(instance_name = %cdb.name_any()))] +// pub async fn reconcile_prometheus_exporter_deployment(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { +// let client = ctx.client.clone(); +// let coredb_name = cdb.metadata.name.clone().expect("should always have a name"); +// let ns = cdb.namespace().unwrap(); +// let name = format!("{}-metrics", cdb.name_any()); +// let mut labels: BTreeMap = BTreeMap::new(); +// let deployment_api: Api = Api::namespaced(client, &ns); +// let oref = cdb.controller_owner_ref(&()).unwrap(); +// labels.insert("app".to_owned(), "postgres-exporter".to_string()); +// labels.insert("component".to_owned(), "metrics".to_string()); +// labels.insert("coredb.io/name".to_owned(), cdb.name_any()); +// +// // Format the postgres-exporter connection URI +// // Check if cnpg is enabled, if so then set the URI to the cnpg service +// // Otherwise, use the old coredb service +// let psql_uri: String = format!("{}-rw.{}.svc.cluster.local:5432/postgres", cdb.name_any(), ns); +// +// // reconcile rbac(service account, role, role binding) for the postgres-exporter +// let rbac = reconcile_rbac( +// cdb, +// ctx.clone(), +// Some("metrics"), +// create_policy_rules(name.clone()).await, +// ) +// .await?; +// +// // Generate the ObjectMeta for the Deployment +// let deployment_metadata = ObjectMeta { +// name: Some(name.to_owned()), +// namespace: Some(ns.to_owned()), +// labels: Some(labels.clone()), +// owner_references: Some(vec![oref]), +// ..ObjectMeta::default() +// }; +// +// // 0 replicas on deployment when stopping +// // 1 replica in all other cases +// let replicas = match cdb.spec.stop { +// true => 0, +// false => 1, +// }; +// +// // Generate the Probe for the Container +// let readiness_probe = Probe { +// http_get: Some(HTTPGetAction { +// path: Some("/metrics".to_string()), +// port: IntOrString::String("metrics".to_string()), +// ..HTTPGetAction::default() +// }), +// initial_delay_seconds: Some(3), +// ..Probe::default() +// }; +// +// // Generate ContainerPort for the Container +// let container_port = vec![ContainerPort { +// container_port: 9187, +// name: Some("metrics".to_string()), +// protocol: Some("TCP".to_string()), +// ..ContainerPort::default() +// }]; +// +// // Generate SecurityContext for the Container +// let security_context = SecurityContext { +// run_as_user: Some(65534), +// allow_privilege_escalation: Some(false), +// ..SecurityContext::default() +// }; +// +// // Generate EnvVar for the Container +// let env_vars = vec![ +// EnvVar { +// name: "DATA_SOURCE_URI".to_string(), +// value: Some(psql_uri.clone()), +// ..EnvVar::default() +// }, +// EnvVar { +// name: "DATA_SOURCE_USER".to_string(), +// value: Some("postgres_exporter".to_string()), +// ..EnvVar::default() +// }, +// // Set EnvVar from a secret +// EnvVar { +// name: "DATA_SOURCE_PASS".to_string(), +// value_from: Some(EnvVarSource { +// secret_key_ref: Some(SecretKeySelector { +// key: "password".to_string(), +// name: Some(format!("{}-exporter", coredb_name.clone())), +// optional: Some(false), +// }), +// ..EnvVarSource::default() +// }), +// ..EnvVar::default() +// }, +// EnvVar { +// name: "PG_EXPORTER_EXTEND_QUERY_PATH".to_string(), +// value: Some(format!("{PROM_CFG_DIR}/{QUERIES_YAML}")), +// ..EnvVar::default() +// }, +// ]; +// +// // Generate VolumeMounts for the Container +// let exporter_vol_mounts = if let Some(metrics) = &cdb.spec.metrics { +// if metrics.queries.is_some() { +// vec![VolumeMount { +// name: EXPORTER_VOLUME.to_owned(), +// mount_path: PROM_CFG_DIR.to_string(), +// ..VolumeMount::default() +// }] +// } else { +// vec![] +// } +// } else { +// vec![] +// }; +// +// // Generate Volumes for the PodSpec +// let exporter_volumes = if let Some(metrics) = &cdb.spec.metrics { +// if metrics.queries.is_some() { +// vec![Volume { +// config_map: Some(ConfigMapVolumeSource { +// name: Some(format!("{}{}", EXPORTER_CONFIGMAP_PREFIX.to_owned(), coredb_name)), +// ..ConfigMapVolumeSource::default() +// }), +// name: EXPORTER_VOLUME.to_owned(), +// ..Volume::default() +// }] +// } else { +// vec![] +// } +// } else { +// vec![] +// }; +// +// // Generate the PodSpec for the PodTemplateSpec +// let pod_spec = PodSpec { +// containers: vec![Container { +// env: Some(env_vars), +// image: Some(get_exporter_image(&cdb.clone())), +// name: "postgres-exporter".to_string(), +// ports: Some(container_port), +// readiness_probe: Some(readiness_probe), +// security_context: Some(security_context), +// volume_mounts: Some(exporter_vol_mounts), +// ..Container::default() +// }], +// service_account: rbac.service_account.metadata.name.clone(), +// service_account_name: rbac.service_account.metadata.name.clone(), +// volumes: Some(exporter_volumes), +// ..PodSpec::default() +// }; +// +// // Generate the PodTemplateSpec for the DeploymentSpec +// let pod_template_spec = PodTemplateSpec { +// metadata: Some(deployment_metadata.clone()), +// spec: Some(pod_spec), +// }; +// +// // Generate the DeploymentSpec for the Deployment +// let deployment_spec = DeploymentSpec { +// replicas: Some(replicas), +// selector: LabelSelector { +// match_labels: Some(labels.clone()), +// ..LabelSelector::default() +// }, +// template: pod_template_spec, +// ..DeploymentSpec::default() +// }; +// +// // Generate the Deployment for Prometheus Exporter +// let deployment = Deployment { +// metadata: deployment_metadata, +// spec: Some(deployment_spec), +// ..Deployment::default() +// }; +// +// let ps = PatchParams::apply("cntrlr").force(); +// let _o = deployment_api +// .patch(&name, &ps, &Patch::Apply(&deployment)) +// .await +// .map_err(Error::KubeError)?; +// +// Ok(()) +// } +// +// // Generate the PolicyRules for the Role +// #[instrument(fields(instance_name = %name))] +// async fn create_policy_rules(name: String) -> Vec { +// vec![ +// // This policy allows get, watch access to a secret in the namespace +// PolicyRule { +// api_groups: Some(vec!["".to_owned()]), +// resource_names: Some(vec![format!("{}", name)]), +// resources: Some(vec!["secrets".to_owned()]), +// verbs: vec!["get".to_string(), "watch".to_string()], +// ..PolicyRule::default() +// }, +// ] +// } +// +// fn get_exporter_image(cdb: &CoreDB) -> String { +// // Check if cdb.spec.postgresExporterImage is set +// // If so, use that image; otherwise, use the default +// // image from default_postgres_exporter_image() function +// if cdb.spec.postgresExporterImage.is_empty() { +// default_postgres_exporter_image() +// } else { +// cdb.spec.postgresExporterImage.clone() +// } +// } diff --git a/tembo-operator/src/lib.rs b/tembo-operator/src/lib.rs index 4b718a8bb..c5712770c 100644 --- a/tembo-operator/src/lib.rs +++ b/tembo-operator/src/lib.rs @@ -21,7 +21,8 @@ pub mod errors; pub mod cloudnativepg; mod deployment_postgres_exporter; -#[cfg(test)] pub mod fixtures; +#[cfg(test)] +pub mod fixtures; pub mod heartbeat; pub mod ingress; pub mod traefik; diff --git a/tembo-operator/src/postgres_exporter.rs b/tembo-operator/src/postgres_exporter.rs index ad4af1d7b..69d8af8dc 100644 --- a/tembo-operator/src/postgres_exporter.rs +++ b/tembo-operator/src/postgres_exporter.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use tracing::debug; -pub const QUERIES_YAML: &str = "queries.yaml"; +pub const QUERIES: &str = "tembo-queries"; pub const EXPORTER_VOLUME: &str = "postgres-exporter"; pub const EXPORTER_CONFIGMAP_PREFIX: &str = "metrics-"; @@ -81,10 +81,59 @@ pub struct Metrics { pub metrics: BTreeMap, } +/// **Example**: This example exposes specific metrics from a query to a +/// [pgmq](https://github.com/tembo-io/pgmq) queue enabled database. +/// +/// ```yaml +/// metrics: +/// enabled: true +/// image: quay.io/prometheuscommunity/postgres-exporter:v0.12.0 +/// queries: +/// pgmq: +/// query: select queue_name, queue_length, oldest_msg_age_sec, newest_msg_age_sec, total_messages from pgmq.metrics_all() +/// primary: true +/// metrics: +/// - queue_name: +/// description: Name of the queue +/// usage: LABEL +/// - queue_length: +/// description: Number of messages in the queue +/// usage: GAUGE +/// - oldest_msg_age_sec: +/// description: Age of the oldest message in the queue, in seconds. +/// usage: GAUGE +/// - newest_msg_age_sec: +/// description: Age of the newest message in the queue, in seconds. +/// usage: GAUGE +/// - total_messages: +/// description: Total number of messages that have passed into the queue. +/// usage: GAUGE +/// ``` #[derive(Clone, Debug, JsonSchema, PartialEq, Serialize, Deserialize)] pub struct QueryItem { + /// the SQL query to run on the target database to generate the metrics pub query: String, + + // We need to support this at some point going forward since master + // if now deprecated. + // whether to run the query only on the primary instance + //pub primary: Option, + + // same as primary (for compatibility with the Prometheus PostgreSQL + // exporter's syntax - **deprecated**) + /// whether to run the query only on the master instance + /// See [https://cloudnative-pg.io/documentation/1.20/monitoring/#structure-of-a-user-defined-metric](https://cloudnative-pg.io/documentation/1.20/monitoring/#structure-of-a-user-defined-metric) pub master: bool, + + /// a list of databases to run the query against, or a shell-like pattern to + /// enable auto discovery. Overwrites the default database if provided. + pub target_database: Option, + + /// the name of the column returned by the query + /// + /// usage: one of the values described below + /// description: the metric's description + /// metrics_mapping: the optional column mapping when usage is set to MAPPEDMETRIC pub metrics: Vec, } @@ -141,7 +190,7 @@ pub async fn reconcile_prom_configmap(cdb: &CoreDB, client: Client, ns: &str) -> match cdb.spec.metrics.clone().and_then(|m| m.queries) { Some(queries) => { let qdata = serde_yaml::to_string(&queries).unwrap(); - let d: BTreeMap = BTreeMap::from([(QUERIES_YAML.to_string(), qdata)]); + let d: BTreeMap = BTreeMap::from([(QUERIES.to_string(), qdata)]); apply_configmap( client.clone(), ns, diff --git a/tembo-operator/src/rbac.rs b/tembo-operator/src/rbac.rs index 36a10c535..2e17192a5 100644 --- a/tembo-operator/src/rbac.rs +++ b/tembo-operator/src/rbac.rs @@ -1,192 +1,310 @@ use crate::{apis::coredb_types::CoreDB, Context, Error}; -use k8s_openapi::{ - api::{ - core::v1::ServiceAccount, - rbac::v1::{PolicyRule, Role, RoleBinding, RoleRef, Subject}, - }, - apimachinery::pkg::apis::meta::v1::ObjectMeta, +use k8s_openapi::api::{ + core::v1::ServiceAccount, + rbac::v1::{Role, RoleBinding}, }; -use kube::{ - api::{Patch, PatchParams}, - Api, ResourceExt, -}; -use std::{collections::BTreeMap, sync::Arc, vec}; +use kube::{api::ListParams, Api, ResourceExt}; +use std::sync::Arc; +use tracing::{debug, error}; -pub struct Rbac { - pub service_account: ServiceAccount, - pub role: Role, - pub rolebinding: RoleBinding, -} +// pub struct Rbac { +// pub service_account: ServiceAccount, +// pub role: Role, +// pub rolebinding: RoleBinding, +// } -// reconcile kubernetes rbac resources -pub async fn reconcile_rbac( - cdb: &CoreDB, - ctx: Arc, - suffix: Option<&str>, - policy_rules: Vec, -) -> Result { - // reconcile service account - let service_account = reconcile_service_account(cdb, ctx.clone(), suffix).await?; - let sa = service_account.clone(); - // reconcile role - let role = reconcile_role(cdb, ctx.clone(), suffix, policy_rules).await?; - let rle = role.clone(); - // reconcile role binding - let role_binding = reconcile_role_binding(cdb, ctx.clone(), service_account, rle.clone(), suffix).await?; - - Ok(Rbac { - service_account: sa, - role: rle, - rolebinding: role_binding, - }) +// Delete the postgres-exporter RBAC objects from the cluster +pub async fn cleanup_postgres_exporter_rbac(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { + delete_postgres_exporter_service_account(cdb, ctx.clone()).await?; + delete_postgres_exporter_role(cdb, ctx.clone()).await?; + delete_postgres_exporter_role_binding(cdb, ctx.clone()).await?; + Ok(()) } -// reconcile a kubernetes service account -async fn reconcile_service_account( - cdb: &CoreDB, - ctx: Arc, - suffix: Option<&str>, -) -> Result { - let suffix = suffix.map_or("sa".to_owned(), |s| { - if s.is_empty() { - "sa".to_owned() - } else { - s.to_owned() - } - }); +// Delete the postgres-exporter ServiceAccount from the cluster +async fn delete_postgres_exporter_service_account(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { let client = ctx.client.clone(); let ns = cdb.namespace().unwrap(); - let name = format!("{}-{}", cdb.name_any(), suffix); - let sa_api: Api = Api::namespaced(client.clone(), &ns); - - let mut labels: BTreeMap = BTreeMap::new(); - labels.insert("app".to_owned(), "coredb".to_string()); - labels.insert("coredb.io/name".to_owned(), cdb.name_any()); - - let mut sa_metadata = ObjectMeta { - name: Some(name.to_owned()), - namespace: Some(ns.to_owned()), - labels: Some(labels.clone()), - ..ObjectMeta::default() - }; - - if let Some(ref template_metadata) = cdb.spec.serviceAccountTemplate.metadata { - if let Some(ref annotations) = template_metadata.annotations { - sa_metadata.annotations = Some(annotations.clone()); - } - } + let service_account_api: Api = Api::namespaced(client, &ns); + + // Define the label selector based on your service account labels + let label_selector = "app=coredb,coredb.io/name=".to_owned() + &cdb.name_any(); + let lp = ListParams::default().labels(&label_selector); - let sa = ServiceAccount { - metadata: sa_metadata, - ..ServiceAccount::default() - }; + // List service accounts with specified labels + let service_accounts = service_account_api.list(&lp).await?; - let ps = PatchParams::apply("cntrlr").force(); - let _o = sa_api - .patch(&name, &ps, &Patch::Apply(&sa)) - .await - .map_err(Error::KubeError)?; + // Delete the service account + for service_account in service_accounts { + if let Some(service_account_name) = service_account.metadata.name { + match service_account_api + .delete(&service_account_name, &Default::default()) + .await + { + Ok(_) => { + debug!( + "Deleted ServiceAccount: {}, for instance {}", + service_account_name, + &cdb.name_any() + ); + } + Err(e) => { + error!( + "Error deleting ServiceAccount: {}, for instance {}", + e, + &cdb.name_any() + ); + return Err(Error::KubeError(e)); + } + } + } + } - Ok(sa) + Ok(()) } -async fn reconcile_role( - cdb: &CoreDB, - ctx: Arc, - suffix: Option<&str>, - policy_rules: Vec, -) -> Result { - let suffix = suffix.map_or("role".to_owned(), |s| { - if s.is_empty() { - "role".to_owned() - } else { - s.to_owned() - } - }); +// Delete the postgres-exporter Role from the cluster +async fn delete_postgres_exporter_role(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { let client = ctx.client.clone(); let ns = cdb.namespace().unwrap(); - let name = format!("{}-{}", cdb.name_any(), suffix); - let role_api: Api = Api::namespaced(client.clone(), &ns); - - let mut labels: BTreeMap = BTreeMap::new(); - labels.insert("app".to_owned(), "coredb".to_string()); - labels.insert("coredb.io/name".to_owned(), cdb.name_any()); - - let role = Role { - metadata: ObjectMeta { - name: Some(name.to_owned()), - namespace: Some(ns.to_owned()), - labels: Some(labels.clone()), - ..ObjectMeta::default() - }, - rules: Some(policy_rules.to_vec()), - }; - - let ps = PatchParams::apply("cntrlr").force(); - let _o = role_api - .patch(&name, &ps, &Patch::Apply(&role)) - .await - .map_err(Error::KubeError)?; - - Ok(role) -} + let role_api: Api = Api::namespaced(client, &ns); + + // Define the label selector based on your role labels + let label_selector = "app=coredb,coredb.io/name=".to_owned() + &cdb.name_any(); + let lp = ListParams::default().labels(&label_selector); + + // List roles with specified labels + let roles = role_api.list(&lp).await?; -async fn reconcile_role_binding( - cdb: &CoreDB, - ctx: Arc, - sa: ServiceAccount, - role: Role, - suffix: Option<&str>, -) -> Result { - let suffix = suffix.map_or("role-binding".to_owned(), |s| { - if s.is_empty() { - "role-binding".to_owned() - } else { - s.to_owned() + // Delete the role + for role in roles { + if let Some(role_name) = role.metadata.name { + match role_api.delete(&role_name, &Default::default()).await { + Ok(_) => { + debug!("Deleted Role: {} for instance {}", role_name, &cdb.name_any()); + } + Err(e) => { + error!("Error deleting Role: {}, for instance {}", e, &cdb.name_any()); + return Err(Error::KubeError(e)); + } + } } - }); + } + + Ok(()) +} + +// Delete the postgres-exporter RoleBinding from the cluster +async fn delete_postgres_exporter_role_binding(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { let client = ctx.client.clone(); let ns = cdb.namespace().unwrap(); - let name = format!("{}-{}", cdb.name_any(), suffix); - let role_binding_api: Api = Api::namespaced(client.clone(), &ns); - let sa_name = sa.name_any(); - let role_name = role.name_any(); - - let mut labels: BTreeMap = BTreeMap::new(); - labels.insert("app".to_owned(), "coredb".to_string()); - labels.insert("coredb.io/name".to_owned(), cdb.name_any()); - - let role_ref = RoleRef { - api_group: "rbac.authorization.k8s.io".to_string(), - kind: "Role".to_string(), - name: role_name.to_string(), - }; - - let subject = Subject { - kind: "ServiceAccount".to_string(), - name: sa_name.to_string(), - namespace: Some(ns.to_owned()), - ..Subject::default() - }; - - let metadata = ObjectMeta { - name: Some(name.to_owned()), - namespace: Some(ns.to_owned()), - labels: Some(labels.clone()), - ..ObjectMeta::default() - }; - - let rb = RoleBinding { - metadata, - role_ref, - subjects: Some(vec![subject]), - }; - - let ps = PatchParams::apply("cntrlr").force(); - let _o = role_binding_api - .patch(&name, &ps, &Patch::Apply(&rb)) - .await - .map_err(Error::KubeError)?; - - Ok(rb) + let role_binding_api: Api = Api::namespaced(client, &ns); + + // Define the label selector based on your role binding labels + let label_selector = "app=coredb,coredb.io/name=".to_owned() + &cdb.name_any(); + let lp = ListParams::default().labels(&label_selector); + + // List role bindings with specified labels + let role_bindings = role_binding_api.list(&lp).await?; + + // Delete the role binding + for role_binding in role_bindings { + if let Some(role_binding_name) = role_binding.metadata.name { + match role_binding_api + .delete(&role_binding_name, &Default::default()) + .await + { + Ok(_) => { + debug!( + "Deleted RoleBinding: {}, for instance {}", + role_binding_name, + &cdb.name_any() + ); + } + Err(e) => { + error!( + "Error deleting RoleBinding: {}, for instance {}", + e, + &cdb.name_any() + ); + return Err(Error::KubeError(e)); + } + } + } + } + + Ok(()) } + +// // reconcile kubernetes rbac resources +// pub async fn reconcile_rbac( +// cdb: &CoreDB, +// ctx: Arc, +// suffix: Option<&str>, +// policy_rules: Vec, +// ) -> Result { +// // reconcile service account +// let service_account = reconcile_service_account(cdb, ctx.clone(), suffix).await?; +// let sa = service_account.clone(); +// // reconcile role +// let role = reconcile_role(cdb, ctx.clone(), suffix, policy_rules).await?; +// let rle = role.clone(); +// // reconcile role binding +// let role_binding = reconcile_role_binding(cdb, ctx.clone(), service_account, rle.clone(), suffix).await?; +// +// Ok(Rbac { +// service_account: sa, +// role: rle, +// rolebinding: role_binding, +// }) +// } +// +// // reconcile a kubernetes service account +// async fn reconcile_service_account( +// cdb: &CoreDB, +// ctx: Arc, +// suffix: Option<&str>, +// ) -> Result { +// let suffix = suffix.map_or("sa".to_owned(), |s| { +// if s.is_empty() { +// "sa".to_owned() +// } else { +// s.to_owned() +// } +// }); +// let client = ctx.client.clone(); +// let ns = cdb.namespace().unwrap(); +// let name = format!("{}-{}", cdb.name_any(), suffix); +// let sa_api: Api = Api::namespaced(client.clone(), &ns); +// +// let mut labels: BTreeMap = BTreeMap::new(); +// labels.insert("app".to_owned(), "coredb".to_string()); +// labels.insert("coredb.io/name".to_owned(), cdb.name_any()); +// +// let mut sa_metadata = ObjectMeta { +// name: Some(name.to_owned()), +// namespace: Some(ns.to_owned()), +// labels: Some(labels.clone()), +// ..ObjectMeta::default() +// }; +// +// if let Some(ref template_metadata) = cdb.spec.serviceAccountTemplate.metadata { +// if let Some(ref annotations) = template_metadata.annotations { +// sa_metadata.annotations = Some(annotations.clone()); +// } +// } +// +// let sa = ServiceAccount { +// metadata: sa_metadata, +// ..ServiceAccount::default() +// }; +// +// let ps = PatchParams::apply("cntrlr").force(); +// let _o = sa_api +// .patch(&name, &ps, &Patch::Apply(&sa)) +// .await +// .map_err(Error::KubeError)?; +// +// Ok(sa) +// } +// +// async fn reconcile_role( +// cdb: &CoreDB, +// ctx: Arc, +// suffix: Option<&str>, +// policy_rules: Vec, +// ) -> Result { +// let suffix = suffix.map_or("role".to_owned(), |s| { +// if s.is_empty() { +// "role".to_owned() +// } else { +// s.to_owned() +// } +// }); +// let client = ctx.client.clone(); +// let ns = cdb.namespace().unwrap(); +// let name = format!("{}-{}", cdb.name_any(), suffix); +// let role_api: Api = Api::namespaced(client.clone(), &ns); +// +// let mut labels: BTreeMap = BTreeMap::new(); +// labels.insert("app".to_owned(), "coredb".to_string()); +// labels.insert("coredb.io/name".to_owned(), cdb.name_any()); +// +// let role = Role { +// metadata: ObjectMeta { +// name: Some(name.to_owned()), +// namespace: Some(ns.to_owned()), +// labels: Some(labels.clone()), +// ..ObjectMeta::default() +// }, +// rules: Some(policy_rules.to_vec()), +// }; +// +// let ps = PatchParams::apply("cntrlr").force(); +// let _o = role_api +// .patch(&name, &ps, &Patch::Apply(&role)) +// .await +// .map_err(Error::KubeError)?; +// +// Ok(role) +// } +// +// async fn reconcile_role_binding( +// cdb: &CoreDB, +// ctx: Arc, +// sa: ServiceAccount, +// role: Role, +// suffix: Option<&str>, +// ) -> Result { +// let suffix = suffix.map_or("role-binding".to_owned(), |s| { +// if s.is_empty() { +// "role-binding".to_owned() +// } else { +// s.to_owned() +// } +// }); +// let client = ctx.client.clone(); +// let ns = cdb.namespace().unwrap(); +// let name = format!("{}-{}", cdb.name_any(), suffix); +// let role_binding_api: Api = Api::namespaced(client.clone(), &ns); +// let sa_name = sa.name_any(); +// let role_name = role.name_any(); +// +// let mut labels: BTreeMap = BTreeMap::new(); +// labels.insert("app".to_owned(), "coredb".to_string()); +// labels.insert("coredb.io/name".to_owned(), cdb.name_any()); +// +// let role_ref = RoleRef { +// api_group: "rbac.authorization.k8s.io".to_string(), +// kind: "Role".to_string(), +// name: role_name.to_string(), +// }; +// +// let subject = Subject { +// kind: "ServiceAccount".to_string(), +// name: sa_name.to_string(), +// namespace: Some(ns.to_owned()), +// ..Subject::default() +// }; +// +// let metadata = ObjectMeta { +// name: Some(name.to_owned()), +// namespace: Some(ns.to_owned()), +// labels: Some(labels.clone()), +// ..ObjectMeta::default() +// }; +// +// let rb = RoleBinding { +// metadata, +// role_ref, +// subjects: Some(vec![subject]), +// }; +// +// let ps = PatchParams::apply("cntrlr").force(); +// let _o = role_binding_api +// .patch(&name, &ps, &Patch::Apply(&rb)) +// .await +// .map_err(Error::KubeError)?; +// +// Ok(rb) +// } diff --git a/tembo-operator/src/service.rs b/tembo-operator/src/service.rs index ef3945adf..460209113 100644 --- a/tembo-operator/src/service.rs +++ b/tembo-operator/src/service.rs @@ -1,63 +1,95 @@ use crate::{apis::coredb_types::CoreDB, Context, Error}; -use k8s_openapi::{ - api::core::v1::{Service, ServicePort, ServiceSpec}, - apimachinery::pkg::{apis::meta::v1::ObjectMeta, util::intstr::IntOrString}, -}; -use kube::{ - api::{Patch, PatchParams}, - Api, Resource, ResourceExt, -}; -use std::{collections::BTreeMap, sync::Arc}; -use tracing::instrument; +use k8s_openapi::api::core::v1::Service; +use kube::{api::ListParams, Api, ResourceExt}; +use std::sync::Arc; +use tracing::{debug, error}; -#[instrument(skip(cdb, ctx), fields(instance_name = %cdb.name_any()))] -pub async fn reconcile_prometheus_exporter_service(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { +// Delete the postgres-exporter service from the cluster +pub async fn delete_postgres_exporter_service(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { let client = ctx.client.clone(); let ns = cdb.namespace().unwrap(); - let name = cdb.name_any() + "-metrics"; - let svc_api: Api = Api::namespaced(client, &ns); - let oref = cdb.controller_owner_ref(&()).unwrap(); + let service_api: Api = Api::namespaced(client, &ns); - if !(cdb.spec.postgresExporterEnabled) { - // check if service exists and delete it - let _o = svc_api.delete(&name, &Default::default()).await; - return Ok(()); - } - - let mut selector_labels: BTreeMap = BTreeMap::new(); - selector_labels.insert("app".to_owned(), "postgres-exporter".to_string()); - selector_labels.insert("coredb.io/name".to_owned(), cdb.name_any()); - selector_labels.insert("component".to_owned(), "metrics".to_string()); - - let mut labels = selector_labels.clone(); - labels.insert("component".to_owned(), "metrics".to_owned()); + // Define the label selector based on your service labels + let label_selector = + "app=postgres-exporter,component=metrics,coredb.io/name=".to_owned() + &cdb.name_any(); + let lp = ListParams::default().labels(&label_selector); - let metrics_svc: Service = Service { - metadata: ObjectMeta { - name: Some(name.to_owned()), - namespace: Some(ns.to_owned()), - labels: Some(labels), - owner_references: Some(vec![oref]), - ..ObjectMeta::default() - }, - spec: Some(ServiceSpec { - ports: Some(vec![ServicePort { - port: 80, - name: Some("metrics".to_string()), - target_port: Some(IntOrString::String("metrics".to_string())), - ..ServicePort::default() - }]), - selector: Some(selector_labels), - ..ServiceSpec::default() - }), - ..Service::default() - }; + // List services with specified labels + let services = service_api.list(&lp).await?; - let ps = PatchParams::apply("cntrlr").force(); - let _o = svc_api - .patch(&name, &ps, &Patch::Apply(&metrics_svc)) - .await - .map_err(Error::KubeError)?; + // Delete the service + for service in services { + if let Some(service_name) = service.metadata.name { + match service_api.delete(&service_name, &Default::default()).await { + Ok(_) => { + debug!( + "Deleted Service: {}, for instance {}", + service_name, + cdb.name_any() + ); + } + Err(e) => { + error!("Error deleting Service: {}, for instance {}", e, cdb.name_any()); + return Err(Error::KubeError(e)); + } + } + } else { + println!("Found a service without a name, skipping..."); + } + } Ok(()) } + +// #[instrument(skip(cdb, ctx), fields(instance_name = %cdb.name_any()))] +// pub async fn reconcile_prometheus_exporter_service(cdb: &CoreDB, ctx: Arc) -> Result<(), Error> { +// let client = ctx.client.clone(); +// let ns = cdb.namespace().unwrap(); +// let name = cdb.name_any() + "-metrics"; +// let svc_api: Api = Api::namespaced(client, &ns); +// let oref = cdb.controller_owner_ref(&()).unwrap(); +// +// if !(cdb.spec.postgresExporterEnabled) { +// // check if service exists and delete it +// let _o = svc_api.delete(&name, &Default::default()).await; +// return Ok(()); +// } +// +// let mut selector_labels: BTreeMap = BTreeMap::new(); +// selector_labels.insert("app".to_owned(), "postgres-exporter".to_string()); +// selector_labels.insert("coredb.io/name".to_owned(), cdb.name_any()); +// selector_labels.insert("component".to_owned(), "metrics".to_string()); +// +// let mut labels = selector_labels.clone(); +// labels.insert("component".to_owned(), "metrics".to_owned()); +// +// let metrics_svc: Service = Service { +// metadata: ObjectMeta { +// name: Some(name.to_owned()), +// namespace: Some(ns.to_owned()), +// labels: Some(labels), +// owner_references: Some(vec![oref]), +// ..ObjectMeta::default() +// }, +// spec: Some(ServiceSpec { +// ports: Some(vec![ServicePort { +// port: 80, +// name: Some("metrics".to_string()), +// target_port: Some(IntOrString::String("metrics".to_string())), +// ..ServicePort::default() +// }]), +// selector: Some(selector_labels), +// ..ServiceSpec::default() +// }), +// ..Service::default() +// }; +// +// let ps = PatchParams::apply("cntrlr").force(); +// let _o = svc_api +// .patch(&name, &ps, &Patch::Apply(&metrics_svc)) +// .await +// .map_err(Error::KubeError)?; +// +// Ok(()) +// } diff --git a/tembo-operator/testdata/prometheus-stack.yaml b/tembo-operator/testdata/prometheus-stack.yaml index d78b9bb80..a7c0da6c4 100644 --- a/tembo-operator/testdata/prometheus-stack.yaml +++ b/tembo-operator/testdata/prometheus-stack.yaml @@ -18,7 +18,37 @@ kubeProxy: enabled: false nodeExporter: enabled: false +kubeControllerManager: + enabled: false +defaultRules: + create: true + rules: + alertmanager: false + etcd: false + configReloaders: false + general: false + k8s: true + kubeApiserver: false + kubeApiserverAvailability: false + kubeApiserverSlos: false + kubelet: true + kubeProxy: false + kubePrometheusGeneral: false + kubePrometheusNodeRecording: false + kubernetesApps: false + kubernetesResources: false + kubernetesStorage: false + kubernetesSystem: false + kubeScheduler: false + kubeStateMetrics: false + network: false + node: true + nodeExporterAlerting: false + nodeExporterRecording: true + prometheus: false + prometheusOperator: false prometheusOperator: + logLevel: debug resources: limits: cpu: 100m @@ -34,3 +64,8 @@ prometheus: limits: cpu: 1 memory: 1024Mi + prometheusSpec: + podMonitorSelectorNilUsesHelmValues: false + probeSelectorNilUsesHelmValues: false + ruleSelectorNilUsesHelmValues: false + serviceMonitorSelectorNilUsesHelmValues: false diff --git a/tembo-operator/yaml/sample-message-queue.yaml b/tembo-operator/yaml/sample-message-queue.yaml index 04b7aadee..79e25ef3d 100644 --- a/tembo-operator/yaml/sample-message-queue.yaml +++ b/tembo-operator/yaml/sample-message-queue.yaml @@ -36,7 +36,7 @@ spec: value: all trunk_installs: - name: pgmq - version: 0.32.1 + version: 1.1.0 - name: pg_partman version: 4.7.3 extensions: @@ -55,7 +55,7 @@ spec: image: quay.io/prometheuscommunity/postgres-exporter:v0.12.0 queries: pgmq: - query: select queue_name, queue_length, oldest_msg_age_sec, newest_msg_age_sec,total_messages from public.pgmq_metrics_all() + query: select queue_name, queue_length, oldest_msg_age_sec, newest_msg_age_sec, total_messages from public.pgmq_metrics_all() master: true metrics: - queue_name: