Skip to content

Instantly share code, notes, and snippets.

@fatmcgav
Last active February 15, 2016 15:47
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save fatmcgav/4ec7b44465c97d246f74 to your computer and use it in GitHub Desktop.
Save fatmcgav/4ec7b44465c97d246f74 to your computer and use it in GitHub Desktop.
Invalid relationship when using hiera_resources
class base::software::pacemaker (
$pacemaker_resource_key = '',
$pacemaker_resource_defaults_key = 'pacemaker_defaults'
) {
# Contain the corosync class
contain ::corosync
# Create required pacemaker resources if required
if !empty($pacemaker_resource_key) {
# Get a hash of default values from hiera
$pacemaker_resource_defaults = hiera_hash($pacemaker_resource_defaults_key, {})
notify { 'pacemaker':
message => "Looking for hiera key: ${pacemaker_resource_key}"
}
#file { '/tmp/pacemaker-resources.json':
# ensure => present,
# content => sorted_json($pacemaker_resources)
#}
# Create the resources from hiera_hash lookup
hiera_resources($pacemaker_resource_key, $pacemaker_resource_defaults)
}
}
[vagrant@puppet production]$ sudo hiera -h nfs_pacemaker_resources ::clientcert=nfs01.lt0772.test ::service=ipa ::environment=dev -d
WARN: Mon Feb 15 15:12:54 +0000 2016: Cannot load backend module_data: no such file to load -- hiera/backend/module_data_backend
DEBUG: Mon Feb 15 15:12:54 +0000 2016: Hiera YAML backend starting
DEBUG: Mon Feb 15 15:12:54 +0000 2016: Looking up nfs_pacemaker_resources in YAML backend
DEBUG: Mon Feb 15 15:12:54 +0000 2016: Looking for data source nodes/nfs01.lt0772.test
DEBUG: Mon Feb 15 15:12:54 +0000 2016: Looking for data source services/ipa
DEBUG: Mon Feb 15 15:12:54 +0000 2016: Found nfs_pacemaker_resources in services/ipa
DEBUG: Mon Feb 15 15:12:54 +0000 2016: Looking for data source common/common
{"cs_commit"=>{"add_drbd_nfs"=>{}},
"cs_primitive"=>
{"p_nfs_service"=>
{"operations"=>{"monitor"=>{"interval"=>"10s"}},
"primitive_type"=>"nfs-server",
"require"=>["Class['::nfs::server']", "Cs_primitve['p_nfs_drbd_mount']"],
"primitive_class"=>"systemd"},
"p_nfs_drbd_data"=>
{"ms_metadata"=>
{"clone-node-max"=>"2",
"master-node-max"=>"1",
"clone-max"=>"2",
"master-max"=>"1",
"notify"=>"true"},
"operations"=>{"monitor"=>{"interval"=>"60s"}},
"primitive_type"=>"drbd",
"require"=>"Drbd::Resource['drbd0']",
"promotable"=>true,
"parameters"=>{"drbd_resource"=>"drbd0"},
"provided_by"=>"linbit"},
"p_nfs_exportfs_home"=>
{"operations"=>{"monitor"=>{"interval"=>"10s"}},
"primitive_type"=>"exportfs",
"require"=>"Cs_primitve['p_nfs_service']",
"parameters"=>
{"directory"=>"/mnt/nfs_data",
"fsid"=>"1",
"options"=>"rw,async,no_root_squash,no_all_squash,no_subtree_check",
"clientspec"=>"192.168.250.0/255.255.255.0"},
"provided_by"=>"heartbeat"},
"p_nfs_vip"=>
{"operations"=>{"monitor"=>{"interval"=>"10s"}},
"primitive_type"=>"IPaddr2",
"parameters"=>{"cidr_netmask"=>24, "ip"=>""},
"provided_by"=>"heartbeat"},
"p_nfs_drbd_mount"=>
{"operations"=>{"monitor"=>{"interval"=>"10s"}},
"primitive_type"=>"Filesystem",
"require"=>"Cs_primitive['p_nfs_drbd_data']",
"parameters"=>
{"directory"=>"/mnt/nfs_data",
"options"=>"noatime,nodiratime",
"device"=>"/dev/drbd0",
"fstype"=>"ext4"},
"provided_by"=>"heartbeat"}},
"cs_group"=>
{"g_nfs"=>
{"primitives"=>
["p_nfs_vip",
"p_nfs_service",
"p_nfs_drbd_mount",
"p_nfs_exportfs_home"],
"cib"=>"add_drbd_nfs",
"notify"=>"Cs_commit['add_drbd_nfs']"}},
"cs_order"=>
{"o_drbd_before_nfs"=>
{"require"=>"Cs_group['g_nfs']",
"first"=>"ms_p_nfs_drbd_data:promote",
"cib"=>"add_drbd_nfs",
"notify"=>"Cs_commit['add_drbd_nfs']",
"second"=>"g_nfs:start"}},
"cs_colocation"=>
{"c_nfs_with_drbd"=>
{"require"=>"Cs_group['g_nfs']",
"primitives"=>["g_nfs", "ms_p_nfs_drbd_data:Master"],
"cib"=>"add_drbd_nfs",
"notify"=>"Cs_commit['add_drbd_nfs']"}},
"cs_property"=>
{"stonith-enabled"=>{"value"=>"false"},
"no-quorum-policy"=>{"value"=>"ignore"}},
"cs_shadow"=>{"add_drbd_nfs"=>{}}}
nfs_pacemaker_resources:
cs_property:
no-quorum-policy:
value: 'ignore'
stonith-enabled:
value: 'false'
cs_commit:
add_drbd_nfs: {}
cs_shadow:
add_drbd_nfs: {}
cs_primitive:
p_nfs_vip:
primitive_type: 'IPaddr2'
provided_by: 'heartbeat'
parameters:
ip: "%{::nfs_ip}"
cidr_netmask: 24
operations:
monitor:
interval: '10s'
p_nfs_drbd_data:
primitive_type: 'drbd'
provided_by: 'linbit'
parameters:
drbd_resource: 'drbd0'
operations:
monitor:
interval: '60s'
promotable: true
ms_metadata:
master-max: '1'
master-node-max: '1'
clone-max: '2'
clone-node-max: '2'
notify: 'true'
require: "Drbd::Resource['drbd0']"
p_nfs_drbd_mount:
primitive_type: 'Filesystem'
provided_by: 'heartbeat'
parameters:
device: '/dev/drbd0'
directory: '/mnt/nfs_data'
fstype: 'ext4'
options: 'noatime,nodiratime'
operations:
monitor:
interval: '10s'
require: "Cs_primitive['p_nfs_drbd_data']"
p_nfs_service:
primitive_class: 'systemd'
primitive_type: 'nfs-server'
operations:
monitor:
interval: '10s'
require:
- "Class['::nfs::server']"
- "Cs_primitve['p_nfs_drbd_mount']"
p_nfs_exportfs_home:
primitive_type: 'exportfs'
provided_by: 'heartbeat'
parameters:
fsid: '1'
directory: '/mnt/nfs_data'
options: 'rw,async,no_root_squash,no_all_squash,no_subtree_check'
clientspec: '192.168.250.0/255.255.255.0'
operations:
monitor:
interval: '10s'
require: "Cs_primitve['p_nfs_service']"
cs_group:
g_nfs:
cib: 'add_drbd_nfs'
primitives:
- 'p_nfs_vip'
- 'p_nfs_service'
- 'p_nfs_drbd_mount'
- 'p_nfs_exportfs_home'
notify: "Cs_commit['add_drbd_nfs']"
cs_colocation:
c_nfs_with_drbd:
cib: 'add_drbd_nfs'
primitives:
- 'g_nfs'
- 'ms_p_nfs_drbd_data:Master'
require: "Cs_group['g_nfs']"
notify: "Cs_commit['add_drbd_nfs']"
cs_order:
o_drbd_before_nfs:
cib: 'add_drbd_nfs'
first: 'ms_p_nfs_drbd_data:promote'
second: 'g_nfs:start'
require: "Cs_group['g_nfs']"
notify: "Cs_commit['add_drbd_nfs']"
Error: Could not retrieve catalog from remote server: Error 400 on SERVER: Invalid relationship: Cs_group[g_nfs] { notify => Cs_commit['add_drbd_nfs'] }, because Cs_commit['add_drbd_nfs'] doesn't seem to be in the catalog
Warning: Not using cache on failed catalog
Error: Could not retrieve catalog; skipping run
pacemaker_defaults:
cs_property:
require: "Service['pacemaker']"
before: "Cs_shadow['add_drbd_nfs']"
cs_primitive:
primitive_class: 'ocf'
cib: 'add_drbd_nfs'
require: "Cs_shadow['add_drbd_nfs']"
notify: "Cs_commit['add_drbd_nfs']"
# hiera_resources - A Hiera wrapper for Puppet's create_resources function
#
require 'puppet/version'
Puppet::Parser::Functions.newfunction(:hiera_resources) do |args|
def error(message)
raise Puppet::Error, message
end
file_name = File.basename(__FILE__, File.extname(__FILE__))
error("%s requires 1 argument" % [file_name]) unless args.length >= 1
if args[1]
error("%s expects a hash as the 2nd argument; got %s" % [file_name, args[1].class]) unless args[1].is_a? Hash
end
debug "Args = #{args.inspect}"
if Puppet.version =~ /^4/
call_function('hiera_hash', args).each do |type, resources|
resources.each do |title, parameter|
if parameter == nil
resources[title] = {}
end
end
# function_create_resources is no workie so we'll do this
method = Puppet::Parser::Functions.function :create_resources
send(method, [type, resources])
end
else
debug 'Running on Puppet 3.x'
defaults_keys = args[1].keys
debug "Defaults keys = #{defaults_keys.inspect}"
function_hiera_hash(args).each do |type, resources|
debug "Type = #{type.inspect}, Resources = #{resources.inspect}"
# Allow resources without parameters (aka default parameters)
resources.each do |title, parameter|
debug "Title = #{title.inspect}, parameter = #{parameter.inspect}"
if parameter == nil
resources[title] = {}
end
# Check if we need to add defaults for this resource type
if defaults_keys.include?(type)
debug "Got a default entry for #{type}. Need to add defaults: #{args[1][type].inspect}"
resources[title].merge!(args[1][type])
debug "Merged resource looks like: #{resources[title].inspect}"
end
end
# function_create_resources is no workie so we'll do this
method = Puppet::Parser::Functions.function :create_resources
send(method, [type, resources])
end
end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment