Skip to content

Instantly share code, notes, and snippets.

@xvello
Last active May 17, 2024 22:36
Show Gist options
  • Save xvello/09d8a2c5b5e1b7ee48dd744658298817 to your computer and use it in GitHub Desktop.
Save xvello/09d8a2c5b5e1b7ee48dd744658298817 to your computer and use it in GitHub Desktop.
Run the Datadog agent on NixOS with podman containers
{ config, lib, pkgs, ... }:
let
# Reuse the main module's config hierarchy for simplicity
cfg = config.services.datadog-agent;
ddConf = {
# Reading secrets from files, make sure to strip newlines.
# Alternatively, add an environmentFiles to the container to set DD_API_KEY
secret_backend_command = "/readsecret.sh";
secret_backend_arguments = "/run/secrets";
api_key = "ENC[api_key]";
# My common options, change as needed
# config.services.datadog-agent.extraConfig is merged to allow per-machine overrides
hostname_fqdn = true;
log_level = "warn";
log_format_json = true;
disable_file_logging = true;
logs_enabled = true;
logs_config = {
use_tcp = false;
use_http = true;
};
} // cfg.extraConfig;
mainConfigFile = pkgs.writeText "datadog.yaml" (builtins.toJSON ddConf);
# Creates one yaml config file per entry in config.services.datadog-agent.checks
# and mounts it in the agent container.
checkConfigVolumes = lib.mapAttrsToList
(name: value:
"${(pkgs.writeText "${name}.yaml" (builtins.toJSON value))}:/etc/datadog-agent/conf.d/${name}.yaml:ro")
cfg.checks;
# Container template used by both agent containers
# Instead of using the container's s6 init to spawn the different agents,
# they are directly started in separate containers, mirroring the k8s chart:
# https://github.com/DataDog/helm-charts/tree/main/charts/datadog/templates
datadog-container = {
image = "datadog/agent:7.33.1";
autoStart = true;
volumes = [
"/etc/resolv.conf:/etc/resolv.conf:ro" # Fix for DNS resolution issues at startup
"/proc/:/host/proc/:ro"
"/sys/fs/cgroup/:/host/sys/fs/cgroup:ro"
"/var/lib/secrets/datadog:/run/secrets:ro" # Where the secrets are stored
"${mainConfigFile}:/etc/datadog-agent/datadog.yaml:ro"
];
user = "root"; # Needed for secret_backend_command, otherwise you can use dd-agent
extraOptions = [
"--network=host" # Allows collecting host network metrics
"--no-healthcheck" # Could not get the healthcheck to work OK
];
environment = {
DD_APM_ENABLED = "false";
DD_PROCESS_AGENT_ENABLED = "true";
};
};
in
{
# Common checks, more can be setup per host
services.datadog-agent.checks = {
journald.logs = [{ type = "journald"; }];
disk.instances = [{ file_system_include = [ "ext4$" ]; }];
network.instances = [{ collect_connection_state = false; excluded_interfaces = [ "lo" "lo0" ]; }];
};
# Make sure the network stays up so that the last payloads are sent.
systemd.services.podman-datadog-agent.after=["network.target"];
systemd.services.podman-process-agent.after=["network.target"];
virtualisation = {
# Podman configuration
podman = {
enable = true;
dockerCompat = true;
};
oci-containers.backend = "podman";
oci-containers.containers = {
# Main agent for metrics and logs
datadog-agent = datadog-container // {
entrypoint = "agent";
cmd = [ "run" ];
volumes = datadog-container.volumes ++ checkConfigVolumes ++ [
"datadog-agent-run:/opt/datadog-agent/run:rw" # Keep log tailing state in a podman volume
"/var/log/journal:/var/log/journal:ro" # journald integration
"/etc/machine-id:/etc/machine-id:ro" # journald integration
];
};
# Live processes / containers agent
process-agent = datadog-container // {
entrypoint = "process-agent";
};
};
};
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment