Created
January 22, 2018 19:10
-
-
Save infinisil/4e92c44a15c7df68fefe6a4e4e8c7d3f to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ config, lib, pkgs, ... }: | |
with lib; | |
let | |
ceph = pkgs.ceph; | |
cfg = config.services.ceph; | |
# function that translates "camelCaseOptions" to "camel case options", credits to tilpner in #nixos@freenode | |
translateOption = replaceStrings upperChars (map (s: " ${s}") lowerChars); | |
in | |
{ | |
options.services.ceph = { | |
# Ceph has a monolithic configuration file but different sections for | |
# each daemon, a separate client section and a global section | |
enable = mkEnableOption "Ceph global configuration"; | |
global = { | |
fsid = mkOption { | |
type = types.str; | |
description = '' | |
Filesystem ID, a generated uuid, its must be generated and set before | |
attempting to start a cluster | |
''; | |
}; | |
clusterName = mkOption { | |
type = types.str; | |
default = "ceph"; | |
description = '' | |
Name of cluster | |
''; | |
}; | |
monInitialMembers = mkOption { | |
type = with types; nullOr commas; | |
default = null; | |
example = '' | |
config.services.ceph.global.monInitialMembers = [ "10.10.0.1" "10.20.0.1" ]; | |
''; | |
description = '' | |
List of hosts that will be used as monitors at startup. | |
''; | |
}; | |
maxOpenFiles = mkOption { | |
type = types.int; | |
default = 131072; | |
description = '' | |
Max open files for each OSD daemon. | |
''; | |
}; | |
authClusterRequired = mkOption { | |
type = types.enum [ "cephx" "none" ]; | |
default = "cephx"; | |
description = '' | |
Enables requiring daemons to authenticate with eachother in the cluster. | |
''; | |
}; | |
authServiceRequired = mkOption { | |
type = types.enum [ "cephx" "none" ]; | |
default = "cephx"; | |
description = '' | |
Enables requiring clients to authenticate with the cluster to access services in the cluster (e.g. radosgw, mds or osd). | |
''; | |
}; | |
authClientRequired = mkOption { | |
type = types.enum [ "cephx" "none" ]; | |
default = "cephx"; | |
description = '' | |
Enables requiring the cluster to authenticate itself to the client. | |
''; | |
}; | |
publicNetwork = mkOption { | |
type = with types; nullOr commas; | |
default = null; | |
description = '' | |
A comma-separated list of subnets that will be used as public networks in the cluster. | |
''; | |
}; | |
clusterNetwork = mkOption { | |
type = with types; nullOr commas; | |
default = null; | |
description = '' | |
A comma-separated list of subnets that will be used as cluster networks in the cluster. | |
''; | |
}; | |
}; | |
mon = { | |
enable = mkEnableOption "Ceph MON daemon"; | |
daemons = mkOption { | |
type = with types; listOf str; | |
example = '' | |
config.services.ceph.mon.daemons = [ "0" "1" ]; | |
''; | |
description = '' | |
A list of monitor daemons that should have services created for them. | |
''; | |
}; | |
extraConfig = mkOption { | |
type = with types; nullOr attrsOf str; | |
default = null; | |
description = '' | |
Extra configuration to add to the monitor section. | |
''; | |
}; | |
}; | |
osd = { | |
enable = mkEnableOption "Ceph OSD daemon"; | |
daemons = mkOption { | |
type = with types; listOf str; | |
example = '' | |
config.services.ceph.osd.daemons = [ "0" "1" ]; | |
''; | |
description = '' | |
A list of OSD daemons that should have services created for them. | |
''; | |
}; | |
extraConfig = mkOption { | |
type = with types; nullOr attrsOf str; | |
default = { | |
"osd journal size" = "10000"; | |
"osd pool default size" = "3"; | |
"osd pool default min size" = "2"; | |
"osd pool default pg num" = "200"; | |
"osd pool default pgp num" = "200"; | |
"osd crush chooseleaf type" = "1"; | |
}; | |
description = '' | |
Extra configuration to add to the OSD section. | |
''; | |
}; | |
}; | |
mds = { | |
enable = mkEnableOption "Ceph MDS daemon"; | |
daemons = mkOption { | |
type = with types; listOf str; | |
example = '' | |
config.services.ceph.mds.daemons = [ "0" "1" ]; | |
''; | |
description = '' | |
A list of metadata service daemons that should have services created for them. | |
''; | |
}; | |
extraConfig = mkOption { | |
type = with types; nullOr attrsOf str; | |
default = null; | |
description = '' | |
Extra configuration to add to the MDS section. | |
''; | |
}; | |
}; | |
rgw = { | |
enable = mkEnableOption "Ceph RadosGW daemon"; | |
}; | |
client = { | |
enable = mkEnableOption "Ceph client configuration"; | |
extraConfig = mkOption { | |
type = with types; nullOr attrsOf str; | |
default = null; | |
example = '' | |
config.services.ceph.client.extraConfig = { | |
"client.radosgw.node0" = { "some config option" = "true"; }; | |
}; | |
''; | |
description = '' | |
Extra configuration to add to the client section. Configuration for rados gateways | |
would be added here, with their own sections, see example. | |
''; | |
}; | |
}; | |
cfg.global = mapAttrs' (name: value: nameValuePair (translateOption name) value) cfg.global; | |
cephConfig = { | |
"global" = cfg.global; | |
} // optionalAttrs cfg.mon.enable { "mon" = cfg.mon.extraConfig; } | |
// optionalAttrs cfg.mds.enable { "mds" = cfg.mds.extraConfig; } | |
// optionalAttrs cfg.osd.enable { "osd" = cfg.osd.extraConfig; } | |
// optionalAttrs cfg.client.enable cfg.client.extraConfig; | |
system.etc."ceph/ceph.conf".text = generators.toINI {} cephConfig; | |
}; | |
config = mkIf config.services.ceph.enable (mkMerge [ | |
{ | |
assertions = [ | |
{ assertion = cfg.global.fsid != ""; | |
message = "fsid has to be set to a valid uuid for the cluster to function"; | |
} | |
{ assertion = cfg.mon.enable == true && cfg.mon.daemons == []; | |
message = "have to set id of atleast one MON if you're going to enable Monitor"; | |
} | |
{ assertion = cfg.mds.enable == true && cfg.mds.daemons == []; | |
message = "have to set id of atleast one MDS if you're going to enable Metadata Service"; | |
} | |
{ assertion = cfg.osd.enable == true && cfg.osd.daemons == []; | |
message = "have to set id of atleast one OSD if you're going to enable OSD"; | |
} | |
]; | |
warnings = [ | |
{ warning = cfg.monInitialMembers == null; | |
message = "Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function"; | |
} | |
]; | |
users.extraUsers = singleton { | |
name = "ceph"; | |
uid = config.ids.uids.ceph; | |
description = "Ceph daemon user"; | |
}; | |
users.extraGroups = singleton { | |
name = "ceph"; | |
gid = config.ids.gids.ceph; | |
}; | |
} | |
(mkIf cfg.rgw.enable { | |
systemd.services."ceph-rgw" = mkIf cfg.rgw.enable { | |
description = "Ceph RadosGW daemon"; | |
after = [ "network-online.target" "local-fs.target" "time-sync.target" ]; | |
wants = [ "network-online.target" "local-fs.target" "time-sync.target" ]; | |
partOf = [ "ceph-rgw.target" ]; | |
wantedBy = [ "ceph-rgw.target" ]; | |
path = ceph; | |
serviceConfig = { | |
LimitNOFILE = 1048576; | |
LimitNPROC = 1048576; | |
Environment = "CLUSTER=${cfg.clusterName}"; | |
ExecStart = "${ceph}/bin/radosgw -f --cluster ${cfg.clusterName} --id client.%i --setuser ceph --setgroup ceph"; | |
ExecReload = "/bin/kill -HUP $MAINPID"; | |
PrivateDevices = "yes"; | |
PrivateTmp = "true"; | |
ProtectHome = "true"; | |
ProtectSystem = "full"; | |
Restart = "on-failure"; | |
StartLimitInterval = "30min"; | |
StartLimitBurst = "5"; | |
User = "ceph"; | |
Group = "ceph"; | |
}; | |
}; | |
}) | |
(mkIf cfg.mon.enable (mkMerge (flip map cfg.mon.daemons (name: { | |
systemd.services."ceph-mon@${name}" = mkIf cfg.mon.enable { | |
description = "Ceph MON daemon ${name}"; | |
after = [ "network-online.target" "local-fs.target" "time-sync.target" ]; | |
wants = [ "network-online.target" "local-fs.target" "time-sync.target" ]; | |
partOf = [ "ceph-mon.target" ]; | |
wantedBy = [ "ceph-mon.target" ]; | |
path = ceph; | |
serviceConfig = { | |
LimitNOFILE = 1048576; | |
LimitNPROC = 1048576; | |
Environment = "CLUSTER=${cfg.clusterName}"; | |
ExecStart = "${ceph}/bin/ceph-mon -f --cluster ${cfg.clusterName} --id %i --setuser ceph --setgroup ceph"; | |
ExecReload = "/bin/kill -HUP $MAINPID"; | |
PrivateDevices = "yes"; | |
PrivateTmp = "true"; | |
ProtectHome = "true"; | |
ProtectSystem = "full"; | |
Restart = "on-failure"; | |
StartLimitInterval = "30min"; | |
StartLimitBurst = "5"; | |
RestartSec = "10"; | |
User = "ceph"; | |
Group = "ceph"; | |
}; | |
}; | |
})))) | |
(mkIf cfg.mds.enable (mkMerge (flip map cfg.mds.daemons (name: { | |
systemd.services."ceph-mds@${name}" = mkIf cfg.mds.enable { | |
description = "Ceph MDS daemon ${name}"; | |
after = [ "network-online.target" "local-fs.target" "time-sync.target" ]; | |
wants = [ "network-online.target" "local-fs.target" "time-sync.target" ]; | |
partOf = [ "ceph-mds.target" ]; | |
wantedBy = [ "ceph-mds.target" ]; | |
path = ceph; | |
serviceConfig = { | |
LimitNOFILE = 1048576; | |
LimitNPROC = 1048576; | |
Environment = "CLUSTER=${cfg.clusterName}"; | |
ExecStart = "${ceph}/bin/ceph-mds -f --cluster ${cfg.clusterName} --id %i --setuser ceph --setgroup ceph"; | |
ExecReload = "/bin/kill -HUP $MAINPID"; | |
PrivateDevices = "yes"; | |
PrivateTmp = "true"; | |
ProtectHome = "true"; | |
ProtectSystem = "full"; | |
Restart = "on-failure"; | |
StartLimitInterval = "30min"; | |
StartLimitBurst = "3"; | |
User = "ceph"; | |
Group = "ceph"; | |
}; | |
}; | |
})))) | |
(mkIf cfg.osd.enable (mkMerge (flip map cfg.osd.daemons (name: { | |
systemd.services."ceph-osd@${name}" = mkIf cfg.osd.enable { | |
description = "Ceph OSD daemon ${name}"; | |
after = [ "network-online.target" "local-fs.target" "time-sync.target" "ceph-mon.target" ]; | |
wants = [ "network-online.target" "local-fs.target" "time-sync.target" ]; | |
partOf = [ "ceph-osd.target" ]; | |
wantedBy = [ "ceph-osd.target" ]; | |
path = ceph; | |
serviceConfig = { | |
LimitNOFILE = 1048576; | |
LimitNPROC = 1048576; | |
Environment = "CLUSTER=${cfg.clusterName}"; | |
ExecPreStart = "${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh --id %i --cluster ${cfg.clusterName}"; | |
ExecStart = "${ceph}/bin/ceph-osd -f --cluster ${cfg.clusterName} --id %i --setuser ceph --setgroup ceph"; | |
ExecReload = "/bin/kill -HUP $MAINPID"; | |
PrivateDevices = "yes"; | |
PrivateTmp = "true"; | |
ProtectHome = "true"; | |
ProtectSystem = "full"; | |
Restart = "on-failure"; | |
RestartSec = "20s"; | |
StartLimitInterval = "30min"; | |
StartLimitBurst = "30"; | |
User = "ceph"; | |
Group = "ceph"; | |
}; | |
}; | |
})))) | |
]); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment