diff --git a/grafana-formula/grafana/files/containers/grafana.container b/grafana-formula/grafana/files/containers/grafana.container new file mode 100644 index 0000000..0d37e71 --- /dev/null +++ b/grafana-formula/grafana/files/containers/grafana.container @@ -0,0 +1,13 @@ +[Unit] +Description=Grafana Container + +[Container] +Label=app=grafana +ContainerName=grafana +Image=registry.opensuse.org/devel/bci/sle-15-sp6/containerfile/suse/grafana:9.5.8 +Volume=/etc/grafana:/etc/grafana:ro +Volume=grafana.volume:/var/lib/grafana +PublishPort=3000:3000 + +[Install] +WantedBy=multi-user.target default.target diff --git a/grafana-formula/grafana/files/containers/grafana.volume b/grafana-formula/grafana/files/containers/grafana.volume new file mode 100644 index 0000000..b1f5aa2 --- /dev/null +++ b/grafana-formula/grafana/files/containers/grafana.volume @@ -0,0 +1,5 @@ +[Unit] +Description=Grafana Container Volume + +[Volume] +Label=app=grafana diff --git a/grafana-formula/grafana/files/datasources.yml b/grafana-formula/grafana/files/datasources.yml index e74724d..64c5967 100644 --- a/grafana-formula/grafana/files/datasources.yml +++ b/grafana-formula/grafana/files/datasources.yml @@ -12,7 +12,7 @@ datasources: - name: {{ name }} type: prometheus access: proxy - url: {{ datasource.url }} + url: {{ datasource.url|replace('localhost', grains['fqdn']) }} basicAuth: {{ basic_auth_enabled }} isDefault: {{ loop.first }} editable: true diff --git a/grafana-formula/grafana/init.sls b/grafana-formula/grafana/init.sls index 3f2c0a7..81bee71 100644 --- a/grafana-formula/grafana/init.sls +++ b/grafana-formula/grafana/init.sls @@ -1,5 +1,5 @@ # check for supported os version -{%- set supported_vers = ['42.3', '12.3', '12.4', '12.5', '15.0', '15.1', '15.2', '15.3', '15.4', '15.5'] %} +{%- set supported_vers = ['42.3', '12.3', '12.4', '12.5', '15.0', '15.1', '15.2', '15.3', '15.4', '15.5', '15.6'] %} # check if supported {%- if (grains['os_family'] == 'Suse' and grains['osrelease'] in supported_vers) %} @@ -19,6 +19,23 @@ {%- else %} {% set product_name = 'SUSE Manager' %} {%- endif %} + +{% set podman_version = salt['pkg.latest_version']('podman') %} +{% if not podman_version %} + {% set podman_version = salt['pkg.version']('podman') %} +{% endif %} +{% set use_podman = salt['pkg.version_cmp'](podman_version, '4.4.0') >= 0 %} + +{% if use_podman %} +install_podman_for_grafana: + pkg.installed: + - name: podman + +uninstall_grafana_package: + pkg.removed: + - name: grafana +{% endif %} + # setup and enable service /etc/grafana/grafana.ini: file.managed: @@ -26,6 +43,7 @@ - makedirs: True - template: jinja + /etc/grafana/provisioning/datasources/datasources.yml: file.managed: - source: salt://grafana/files/datasources.yml @@ -136,6 +154,29 @@ grafana-sap-netweaver-dashboards: pkg.removed {%- endif %} +{% if use_podman %} +grafana-container: + file.managed: + - names: + - /etc/containers/systemd/grafana.container: + - source: salt://grafana/files/containers/grafana.container + - /etc/containers/systemd/grafana.volume: + - source: salt://grafana/files/containers/grafana.volume + - user: root + - group: root + - mode: 644 + module.run: + - name: service.systemctl_reload + service.running: + - name: grafana + - enable: true + - watch: + - file: /etc/containers/systemd/grafana.* + - file: /etc/grafana/provisioning/datasources/datasources.yml + - file: /etc/grafana/provisioning/dashboards/* + - file: /etc/grafana/grafana.ini + +{% else %} grafana-server: pkg.installed: - names: @@ -146,11 +187,26 @@ grafana-server: - file: /etc/grafana/provisioning/datasources/datasources.yml - file: /etc/grafana/provisioning/dashboards/* - file: /etc/grafana/grafana.ini +{% endif %} {%- else %} # disable service +{% if use_podman %} +grafana-container: + service.dead: + - name: grafana + - enable: false + file.absent: + - names: + - /etc/containers/systemd/grafana.container + - /etc/containers/systemd/grafana.volume + module.run: + - name: service.systemctl_reload + +{% else %} grafana-server: service.dead: - enable: False +{% endif %} {%- endif %} {%- endif %} diff --git a/prometheus-formula/metadata/form.yml b/prometheus-formula/metadata/form.yml index 978fe4d..0cb7424 100644 --- a/prometheus-formula/metadata/form.yml +++ b/prometheus-formula/metadata/form.yml @@ -126,6 +126,13 @@ prometheus: $name: Enable local Alertmanager service $help: Install and start local Alertmanager without clustering + alertmanager_config: + $name: Alertmanager configuration + $type: text + $default: /etc/prometheus/alertmanager.yml + $help: Please refer to the documentation for available options. + $visible: this.parent.value.alertmanager_service == true + use_local_alertmanager: $type: boolean $name: Use local Alertmanager diff --git a/prometheus-formula/prometheus/files/alertmanager.yml b/prometheus-formula/prometheus/files/alertmanager.yml new file mode 100644 index 0000000..6708974 --- /dev/null +++ b/prometheus-formula/prometheus/files/alertmanager.yml @@ -0,0 +1,126 @@ +# Sample configuration. +# See https://prometheus.io/docs/alerting/configuration/ for documentation. + +global: + # The smarthost and SMTP sender used for mail notifications. + smtp_smarthost: "localhost:25" + smtp_from: "alertmanager@example.org" + +# The root route on which each incoming alert enters. +route: + # The root route must not have any matchers as it is the entry point for + # all alerts. It needs to have a receiver configured so alerts that do not + # match any of the sub-routes are sent to someone. + receiver: "team-X-mails" + + # The labels by which incoming alerts are grouped together. For example, + # multiple alerts coming in for cluster=A and alertname=LatencyHigh would + # be batched into a single group. + # + # To aggregate by all possible labels use '...' as the sole label name. + # This effectively disables aggregation entirely, passing through all + # alerts as-is. This is unlikely to be what you want, unless you have + # a very low alert volume or your upstream notification system performs + # its own grouping. Example: group_by: [...] + group_by: ["alertname", "cluster"] + + # When a new group of alerts is created by an incoming alert, wait at + # least 'group_wait' to send the initial notification. + # This way ensures that you get multiple alerts for the same group that start + # firing shortly after another are batched together on the first + # notification. + group_wait: 30s + + # When the first notification was sent, wait 'group_interval' to send a batch + # of new alerts that started firing for that group. + group_interval: 5m + + # If an alert has successfully been sent, wait 'repeat_interval' to + # resend them. + repeat_interval: 3h + + # All the above attributes are inherited by all child routes and can + # overwritten on each. + + # The child route trees. + routes: + # This routes performs a regular expression match on alert labels to + # catch alerts that are related to a list of services. + - match_re: + service: ^(foo1|foo2|baz)$ + receiver: team-X-mails + + # The service has a sub-route for critical alerts, any alerts + # that do not match, i.e. severity != critical, fall-back to the + # parent node and are sent to 'team-X-mails' + routes: + - match: + severity: critical + receiver: team-X-pager + + - match: + service: files + receiver: team-Y-mails + + routes: + - match: + severity: critical + receiver: team-Y-pager + + # This route handles all alerts coming from a database service. If there's + # no team to handle it, it defaults to the DB team. + - match: + service: database + + receiver: team-DB-pager + # Also group alerts by affected database. + group_by: [alertname, cluster, database] + + routes: + - match: + owner: team-X + receiver: team-X-pager + + - match: + owner: team-Y + receiver: team-Y-pager + +# Inhibition rules allow to mute a set of alerts given that another alert is +# firing. +# We use this to mute any warning-level notifications if the same alert is +# already critical. +inhibit_rules: + - source_match: + severity: "critical" + target_match: + severity: "warning" + # Apply inhibition if the alertname is the same. + # CAUTION: + # If all label names listed in `equal` are missing + # from both the source and target alerts, + # the inhibition rule will apply! + equal: ["alertname"] + +receivers: + - name: "team-X-mails" + email_configs: + - to: "team-X+alerts@example.org, team-Y+alerts@example.org" + + - name: "team-X-pager" + email_configs: + - to: "team-X+alerts-critical@example.org" + pagerduty_configs: + - routing_key: + + - name: "team-Y-mails" + email_configs: + - to: "team-Y+alerts@example.org" + + - name: "team-Y-pager" + pagerduty_configs: + - routing_key: + + - name: "team-DB-pager" + pagerduty_configs: + - routing_key: + diff --git a/prometheus-formula/prometheus/files/blackbox.yml b/prometheus-formula/prometheus/files/blackbox.yml new file mode 100644 index 0000000..1ad0c81 --- /dev/null +++ b/prometheus-formula/prometheus/files/blackbox.yml @@ -0,0 +1,51 @@ +modules: + http_2xx: + prober: http + http: + preferred_ip_protocol: "ip4" + http_post_2xx: + prober: http + http: + method: POST + tcp_connect: + prober: tcp + pop3s_banner: + prober: tcp + tcp: + query_response: + - expect: "^+OK" + tls: true + tls_config: + insecure_skip_verify: false + grpc: + prober: grpc + grpc: + tls: true + preferred_ip_protocol: "ip4" + grpc_plain: + prober: grpc + grpc: + tls: false + service: "service1" + ssh_banner: + prober: tcp + tcp: + query_response: + - expect: "^SSH-2.0-" + - send: "SSH-2.0-blackbox-ssh-check" + irc_banner: + prober: tcp + tcp: + query_response: + - send: "NICK prober" + - send: "USER prober prober prober :prober" + - expect: "PING :([^ ]+)" + send: "PONG ${1}" + - expect: "^:[^ ]+ 001" + icmp: + prober: icmp + icmp_ttl5: + prober: icmp + timeout: 5s + icmp: + ttl: 5 diff --git a/prometheus-formula/prometheus/files/containers/alertmanager.container b/prometheus-formula/prometheus/files/containers/alertmanager.container new file mode 100644 index 0000000..34d5976 --- /dev/null +++ b/prometheus-formula/prometheus/files/containers/alertmanager.container @@ -0,0 +1,26 @@ +{%- set tls_enabled = salt['pillar.get']('prometheus:tls:enabled', False) %} +{% set config = salt['pillar.get']('prometheus:alerting:alertmanager_config') %} +{%- set entrypoint = ['/usr/bin/prometheus-alertmanager'] %} +{%- if config %} +{% do entrypoint.append('--config.file=' ~ config) %} +{% endif -%} +{%- if tls_enabled %} +{% do entrypoint.append('--web.config.file=' ~ web_config_file) %} +{%- endif -%} + +[Unit] +Description=Alertmanager Container + +[Container] +Label=app=alertmanager +ContainerName=alertmanager +Image=registry.opensuse.org/devel/bci/sle-15-sp6/containerfile/suse/alertmanager:0.26.0 +Volume=/etc/prometheus:/etc/prometheus:ro +Volume=alertmanager.volume:/var/lib/prometheus/alertmanager +{% if entrypoint|length > 1 -%} +PodmanArgs=--entrypoint '{{ entrypoint|tojson }}' +{%- endif %} +PublishPort=9093:9093 + +[Install] +WantedBy=multi-user.target default.target diff --git a/prometheus-formula/prometheus/files/containers/alertmanager.volume b/prometheus-formula/prometheus/files/containers/alertmanager.volume new file mode 100644 index 0000000..3b3198f --- /dev/null +++ b/prometheus-formula/prometheus/files/containers/alertmanager.volume @@ -0,0 +1,5 @@ +[Unit] +Description=Alertmanager Container Volume + +[Volume] +Label=app=alertmanager diff --git a/prometheus-formula/prometheus/files/containers/blackbox_exporter.container b/prometheus-formula/prometheus/files/containers/blackbox_exporter.container new file mode 100644 index 0000000..375a3c4 --- /dev/null +++ b/prometheus-formula/prometheus/files/containers/blackbox_exporter.container @@ -0,0 +1,25 @@ +{%- set tls_enabled = salt['pillar.get']('prometheus:tls:enabled', False) %} +{%- set args = salt['pillar.get']('prometheus:blackbox_exporter:args').split(' ') %} +{%- set entrypoint = ['/usr/bin/blackbox_exporter'] %} +{%- if args %} +{%- do entrypoint.extend(args) %} +{%- endif -%} +{%- if tls_enabled %} +{%- do entrypoint.append('--web.config.file=' ~ web_config_file) %} +{%- endif -%} + +[Unit] +Description=Blackbox Exporter Container + +[Container] +Label=app=blackbox_exporter +ContainerName=blackbox_exporter +Image=registry.opensuse.org/devel/bci/sle-15-sp6/containerfile/suse/blackbox_exporter:0.24.0 +Volume=/etc/prometheus:/etc/prometheus:ro +{% if entrypoint|length > 1 -%} +PodmanArgs=--entrypoint '{{ entrypoint|tojson }}' +{%- endif %} +PublishPort=9115:9115 + +[Install] +WantedBy=multi-user.target default.target diff --git a/prometheus-formula/prometheus/files/containers/prometheus.container b/prometheus-formula/prometheus/files/containers/prometheus.container new file mode 100644 index 0000000..94476b9 --- /dev/null +++ b/prometheus-formula/prometheus/files/containers/prometheus.container @@ -0,0 +1,25 @@ +{%- set tls_enabled = salt['pillar.get']('prometheus:tls:enabled', False) %} +{%- set entrypoint = ['/usr/bin/prometheus'] %} +{%- if tls_enabled %} +{% do entrypoint.append('--web.config.file=' ~ web_config_file) %} +{%- endif -%} +{% if enable_receiver %} +{% do entrypoint.append('--web.enable-remote-write-receiver') %} +{% endif -%} +[Unit] +Description=Prometheus Container + +[Container] +Label=app=prometheus +ContainerName=prometheus +Image=registry.opensuse.org/devel/bci/sle-15-sp6/containerfile/suse/prometheus:2.37.6 +Volume=/etc/prometheus:/etc/prometheus:ro +Volume=/etc/pki/trust/anchors:/etc/pki/tls/certs:ro +Volume=prometheus.volume:/var/lib/prometheus +PublishPort=9090:9090 +{% if entrypoint|length > 1 -%} +PodmanArgs=--entrypoint '{{ entrypoint|tojson }}' +{%- endif %} + +[Install] +WantedBy=multi-user.target default.target diff --git a/prometheus-formula/prometheus/files/containers/prometheus.volume b/prometheus-formula/prometheus/files/containers/prometheus.volume new file mode 100644 index 0000000..ebcef67 --- /dev/null +++ b/prometheus-formula/prometheus/files/containers/prometheus.volume @@ -0,0 +1,5 @@ +[Unit] +Description=Prometheus Container Volume + +[Volume] +Label=app=prometheus diff --git a/prometheus-formula/prometheus/files/mgr-server.yml b/prometheus-formula/prometheus/files/mgr-server.yml index e04f13f..8caf539 100644 --- a/prometheus-formula/prometheus/files/mgr-server.yml +++ b/prometheus-formula/prometheus/files/mgr-server.yml @@ -1,10 +1,14 @@ - targets: - - {{ uyuni_server_hostname }}:9100 - - {{ uyuni_server_hostname }}:5556 - - {{ uyuni_server_hostname }}:5557 - - {{ uyuni_server_hostname }}:9800 + - {{ uyuni_server_hostname }}:9100 # Node exporter + - {{ uyuni_server_hostname }}:5556 # Tomcat JMX + - {{ uyuni_server_hostname }}:5557 # Taskomatic JMX + - {{ uyuni_server_hostname }}:9800 # Uyuni exporter labels: {} - targets: - - {{ uyuni_server_hostname }}:9187 + - {{ uyuni_server_hostname }}:80 # Message queue + labels: + __metrics_path__: /rhn/metrics +- targets: + - {{ uyuni_server_hostname }}:9187 # PostgreSQL labels: role: postgres diff --git a/prometheus-formula/prometheus/files/prometheus.yml b/prometheus-formula/prometheus/files/prometheus.yml index 1f987e3..1b65fae 100644 --- a/prometheus-formula/prometheus/files/prometheus.yml +++ b/prometheus-formula/prometheus/files/prometheus.yml @@ -15,7 +15,7 @@ alerting: - {{ alertmanager }} {% endfor %} {% elif local_alertmanager %} - - localhost:9093 + - {{ grains['fqdn'] }}:9093 {% endif %} {% endif %} @@ -38,22 +38,11 @@ scrape_configs: # Monitor {{ uyuni_server_hostname }} # -------------------- - job_name: 'mgr-server' - static_configs: - - targets: - - {{ uyuni_server_hostname }}:9100 # Node exporter - - {{ uyuni_server_hostname }}:5556 # Tomcat JMX - - {{ uyuni_server_hostname }}:5557 # Taskomatic JMX - - {{ uyuni_server_hostname }}:9800 # Uyuni server exporter - labels: {} - - targets: - - {{ uyuni_server_hostname }}:80 # Message queue - labels: - __metrics_path__: /rhn/metrics - - targets: - - {{ uyuni_server_hostname }}:9187 # PostgresSQL - labels: - role: postgres + file_sd_configs: + - files: + - mgr-scrape-config/mgr-server.yml {%- endif %} + {% set sd_username = salt['pillar.get']('prometheus:mgr:sd_username') %} {% set sd_password = salt['pillar.get']('prometheus:mgr:sd_password') %} {% set sd_groups = salt['pillar.get']('prometheus:mgr:sd_groups') %} @@ -128,7 +117,7 @@ scrape_configs: - job_name: prometheus static_configs: - targets: - - {{ grains['fqdn'] }}:9090 + - localhost:9090 {% endif %} {% for job, config in salt['pillar.get']('prometheus:federation').items() %} diff --git a/prometheus-formula/prometheus/init.sls b/prometheus-formula/prometheus/init.sls index 6ba2d6a..42ab02b 100644 --- a/prometheus-formula/prometheus/init.sls +++ b/prometheus-formula/prometheus/init.sls @@ -2,6 +2,7 @@ {%- if prometheus %} {%- if salt['pillar.get']('prometheus:enabled', False) %} +{%- set alertmanager_enabled = salt['pillar.get']('prometheus:alerting:alertmanager_service', False) %} # setup Prometheus {%- set monitor_server = salt['pillar.get']('prometheus:mgr:monitor_server', False) %} {%- set alertmanager_service = salt['pillar.get']('prometheus:alerting:alertmanager_service', False) %} @@ -12,10 +13,33 @@ {% set prometheus_web_config_file = '/etc/prometheus/web.yml' %} {% set blackbox_exporter_web_config_file = '/etc/prometheus/exporters/blackbox-web.yml' %} + +{% set podman_version = salt['pkg.latest_version']('podman') %} +{% if not podman_version %} + {% set podman_version = salt['pkg.version']('podman') %} +{% endif %} +{% set use_podman = salt['pkg.version_cmp'](podman_version, '4.4.0') >= 0 %} +{% if use_podman %} +install_podman_for_prometheus: + pkg.installed: + - name: podman + +include: + - prometheus/uninstall_packages + +{% else %} install_prometheus: pkg.installed: - name: {{ prometheus.prometheus_package }} +{%- if alertmanager_enabled %} +install_alertmanager: + pkg.installed: + - name: {{ prometheus.alertmanager_package }} +{% endif %} +{% endif %} + + {% set firewall_active = salt['service.available']('firewalld') and salt['service.status']('firewalld') %} {% if firewall_active %} firewall_prometheus: @@ -26,15 +50,11 @@ firewall_prometheus: - prometheus {% endif %} -install_alertmanager: - pkg.installed: - - name: {{ prometheus.alertmanager_package }} - {% set prometheus_version = salt['pkg.latest_version'](prometheus.prometheus_package) %} {% if not prometheus_version %} {% set prometheus_version = salt['pkg.version'](prometheus.prometheus_package) %} {% endif %} -{% if salt['pkg.version_cmp'](prometheus_version, '2.31.0') >= 0 %} +{% if salt['pkg.version_cmp'](prometheus_version, '2.31.0') >= 0 or use_podman %} {% set prometheus_config_template = prometheus.prometheus_config %} {% else %} {% set prometheus_config_template = prometheus.prometheus_config_old %} @@ -47,9 +67,6 @@ config_file: - group: root - mode: 644 - template: jinja - - require: - - pkg: install_prometheus - - pkg: install_alertmanager - defaults: uyuni_server_hostname: {{ uyuni_server_hostname }} @@ -62,9 +79,6 @@ prometheus_web_config: - group: root - mode: 644 - template: jinja - - require: - - pkg: install_prometheus - - pkg: install_alertmanager {% endif %} {% if default_rules %} @@ -79,9 +93,6 @@ default_rule_files: - group: root - mode: 644 - makedirs: True - - require: - - pkg: install_prometheus - - pkg: install_alertmanager {% endif %} {%- if monitor_server %} @@ -94,13 +105,40 @@ mgr_scrape_config_file: - mode: 644 - makedirs: True - template: jinja - - require: - - pkg: install_prometheus - - pkg: install_alertmanager - defaults: uyuni_server_hostname: {{ uyuni_server_hostname }} {%- endif %} +{%- if use_podman %} +prometheus_container_running: + file.managed: + - names: + - /etc/containers/systemd/prometheus.container: + - source: salt://prometheus/files/containers/prometheus.container + - /etc/containers/systemd/prometheus.volume: + - source: salt://prometheus/files/containers/prometheus.volume + - user: root + - group: root + - mode: 644 + - template: jinja + - defaults: + web_config_file: {{ prometheus_web_config_file }} + enable_receiver: {{ remote_write_receiver_enabled }} + module.run: + - name: service.systemctl_reload + service.running: + - name: {{ prometheus.prometheus_service }} + - enable: true + - watch: + - file: /etc/containers/systemd/prometheus.* + - file: config_file +{% if tls_enabled %} + - file: prometheus_web_config +{% endif %} +{% if default_rules %} + - file: default_rule_files +{% endif %} +{% else %} prometheus_running: file.managed: - name: /etc/systemd/system/prometheus.service.d/uyuni.conf @@ -143,7 +181,55 @@ prometheus_running: {%- if monitor_server %} - file: mgr_scrape_config_file {%- endif %} +{% endif %} + +{% if alertmanager_enabled %} +alertmanager_config_file: + file.managed: + - name: /etc/prometheus/alertmanager.yml + - source: salt://prometheus/files/alertmanager.yml + - user: root + - group: root + - mode: 644 +{%- if use_podman %} +alertmanager_container_running: +{% if alertmanager_service %} + file.managed: + - names: + - /etc/containers/systemd/alertmanager.container: + - source: salt://prometheus/files/containers/alertmanager.container + - /etc/containers/systemd/alertmanager.volume: + - source: salt://prometheus/files/containers/alertmanager.volume + - user: root + - group: root + - mode: 644 + - template: jinja + - defaults: + web_config_file: {{ prometheus_web_config_file }} + module.run: + - name: service.systemctl_reload + service.running: + - name: alertmanager + - enable: true + - watch: + - file: /etc/containers/systemd/alertmanager.* +{% if tls_enabled %} + - file: prometheus_web_config +{%- endif %} +{% else %} + service.dead: + - name: alertmanager + - enable: False + file.absent: + - names: + - /etc/containers/systemd/alertmanager.container + - /etc/containers/systemd/alertmanager.volume + module.run: + - name: service.systemctl_reload +{% endif %} + +{%- else %} alertmanager_running: {% if alertmanager_service %} file.managed: @@ -171,8 +257,9 @@ alertmanager_running: - name: {{ prometheus.alertmanager_service }} - enable: False {%- endif %} +{% endif %} -{% if alertmanager_service and firewall_active %} +{% if firewall_active %} alertmanager_service: firewalld.service: - name: prometheus-alertmanager @@ -186,9 +273,19 @@ firewall_alertmanager: - services: - prometheus-alertmanager {% endif %} +{% endif %} {% set blackbox_exporter_enabled = salt['pillar.get']('prometheus:blackbox_exporter:enabled', False) %} -{% if blackbox_exporter_enabled and tls_enabled %} +{%- if blackbox_exporter_enabled %} +blackbox_exporter_config_file: + file.managed: + - name: /etc/prometheus/blackbox.yml + - source: salt://prometheus/files/blackbox.yml + - user: root + - group: root + - mode: 644 + +{% if tls_enabled %} blackbox_exporter_web_config: file.managed: - name: {{ blackbox_exporter_web_config_file }} @@ -201,7 +298,41 @@ blackbox_exporter_web_config: - watch_in: - service: blackbox_exporter {% endif %} +{% endif %} + +{%- if use_podman %} +blackbox_exporter_container: +{%- if blackbox_exporter_enabled %} + file.managed: + - name: /etc/containers/systemd/blackbox_exporter.container + - source: salt://prometheus/files/containers/blackbox_exporter.container + - user: root + - group: root + - mode: 644 + - template: jinja + - defaults: + web_config_file: {{ blackbox_exporter_web_config_file }} + module.run: + - name: service.systemctl_reload + service.running: + - name: blackbox_exporter + - enable: true + - watch: + - file: /etc/containers/systemd/blackbox_exporter.container +{% if tls_enabled %} + - file: {{ blackbox_exporter_web_config_file }} +{%- endif %} +{% else %} + service.dead: + - name: blackbox_exporter + - enable: False + file.absent: + - name: /etc/containers/systemd/blackbox_exporter.container + module.run: + - name: service.systemctl_reload +{% endif %} +{% else %} blackbox_exporter: {% if blackbox_exporter_enabled %} {% set blackbox_exporter_args = salt['pillar.get']('prometheus:blackbox_exporter:args') %} @@ -241,22 +372,17 @@ blackbox_exporter: - name: {{ prometheus.blackbox_exporter_service }} - enable: False {% endif %} +{% endif %} {%- else %} # remove prometheus -remove_prometheus: - pkg.removed: - - name: {{ prometheus.prometheus_package }} - -remove_alertmanager: - pkg.removed: - - name: {{ prometheus.alertmanager_package }} +include: + - prometheus/uninstall_packages /etc/prometheus: file.absent: - require: - - pkg: remove_prometheus - - pkg: remove_alertmanager + - remove_prometheus_packages {%- endif %} {%- endif %} diff --git a/prometheus-formula/prometheus/uninstall_packages.sls b/prometheus-formula/prometheus/uninstall_packages.sls new file mode 100644 index 0000000..5683229 --- /dev/null +++ b/prometheus-formula/prometheus/uninstall_packages.sls @@ -0,0 +1,8 @@ +{% from "prometheus/map.jinja" import prometheus with context %} + +remove_prometheus_packages: + pkg.removed: + - pkgs: + - {{ prometheus.prometheus_package }} + - {{ prometheus.alertmanager_package }} + - {{ prometheus.blackbox_exporter_package }}