diff --git a/lib/facter/gluster.rb b/lib/facter/gluster.rb
index 37ae6bce..0bd30207 100644
--- a/lib/facter/gluster.rb
+++ b/lib/facter/gluster.rb
@@ -1,6 +1,11 @@
+# vim: syntax=ruby tabstop=2 softtabstop=2 shiftwidth=2
+
+require 'rexml/document'
+
+gluster_peers = {}
+gluster_volumes = {}
peer_count = 0
peer_list = ''
-volume_bricks = {}
volume_options = {}
volume_ports = {}
@@ -19,40 +24,87 @@
end
if binary
- # the Gluster binary command to use
+ # Gluster facts don't make sense if the Gluster binary isn't present
+
+ # The Gluster binary command to use
Facter.add(:gluster_binary) do
setcode do
binary
end
end
- output = Facter::Util::Resolution.exec("#{binary} peer status")
- peer_count = Regexp.last_match[1].to_i if output =~ %r{^Number of Peers: (\d+)$}
- if peer_count > 0
- peer_list = output.scan(%r{^Hostname: (.+)$}).flatten.join(',')
- other_names = output.scan(%r{^Other names:\n((.+\n)+)}).flatten.join.scan(%r{(.+)\n?}).sort.uniq.flatten.join(',')
- peer_list += ',' + other_names if other_names
+
+ # Get our peer information from gluster peer status --xml (Code credit to github user: coder-hugo)
+ peer_status_xml = REXML::Document.new(Facter::Util::Resolution.exec("#{binary} peer status --xml"))
+ REXML::XPath.match(peer_status_xml, '/cliOutput/peerStatus/peer').each do |peer_xml|
+ # Get the peer hostname
+ peer = peer_xml.elements['hostname'].text.to_s
+
+ # Define a per-peer hash to contain our data elements
+ gluster_peers[peer] = {}
+
+ gluster_peers[peer]['uuid'] = peer_xml.elements['uuid'].text.to_s
+ gluster_peers[peer]['connected'] = peer_xml.elements['connected'].text.to_i
+ gluster_peers[peer]['state'] = peer_xml.elements['state'].text.to_i
+ gluster_peers[peer]['status'] = peer_xml.elements['stateStr'].text.to_s
end
- # note the stderr redirection here
- # `gluster volume list` spits to stderr :(
- output = Facter::Util::Resolution.exec("#{binary} volume list 2>&1")
- if output != 'No volumes present in cluster'
- output.split.each do |vol|
- # If a brick has trailing informaion such as (arbiter) remove it
- info = Facter::Util::Resolution.exec("#{binary} volume info #{vol} | sed 's/ (arbiter)//g'")
- vol_status = Regexp.last_match[1] if info =~ %r{^Status: (.+)$}
- bricks = info.scan(%r{^Brick[^:]+: (.+)$}).flatten
- volume_bricks[vol] = bricks
- options = info.scan(%r{^(\w+\.[^:]+: .+)$}).flatten
- volume_options[vol] = options if options
- next unless vol_status == 'Started'
- status = Facter::Util::Resolution.exec("#{binary} volume status #{vol} 2>/dev/null")
- if status =~ %r{^Brick}
- volume_ports[vol] = status.scan(%r{^Brick [^\t]+\t+(\d+)}).flatten.uniq.sort
+
+ # Extract and format the data needed for the legacy peer facts.
+ peer_count = gluster_peers.size
+ peer_list = gluster_peers.keys.join(',')
+
+ # Get our volume information from gluster volume info
+ volume_info_xml = REXML::Document.new(Facter::Util::Resolution.exec("#{binary} volume info --xml"))
+ REXML::XPath.match(volume_info_xml, '/cliOutput/volInfo/volumes/volume').each do |volume_xml|
+ volume = volume_xml.elements['name'].text.to_s
+
+ # Create hash entry for each volume in a structured fact.
+ gluster_volumes[volume] = {}
+
+ vol_status = volume_xml.elements['statusStr'].text.to_s
+ gluster_volumes[volume]['status'] = vol_status
+
+ # Define gluster_volumes[volume]['bricks'] as an array so we can .push() to it.
+ gluster_volumes[volume]['bricks'] = []
+
+ REXML::XPath.match(volume_xml, 'bricks/brick').each do |brick_xml|
+ # We need to loop over the bricks so that we can change the text from :REXML::Text. to String
+ brick_name = brick_xml.elements['name'].text.to_s
+ gluster_volumes[volume]['bricks'].push(brick_name)
+ end
+
+ options = REXML::XPath.match(volume_xml, 'options/option').map { |option| "#{option.elements['name'].text}: #{option.elements['value'].text}" }
+ if options
+ volume_options[volume] = options
+ gluster_volumes[volume]['features'] = {}
+ gluster_volumes[volume]['options'] = {}
+ # Convert options into key: value pairs for easy retrieval if needed.
+ options.each do |option|
+ option_name, set_value = option.split(': ', 2)
+
+ if option_name =~ %r{^features\.}
+ gluster_volumes[volume]['features'][option_name] = set_value
+ else
+ gluster_volumes[volume]['options'][option_name] = set_value
+ end
end
end
+
+ next unless vol_status == 'Started'
+
+ volume_status_xml = REXML::Document.new(Facter::Util::Resolution.exec("#{binary} volume status #{volume} --xml"))
+ volume_ports[volume] = REXML::XPath.match(volume_status_xml, "/cliOutput/volStatus/volumes/volume/node[starts-with(hostname/text(), '#{Facter.value('hostname')}')]/port/text()")
+
+ # Define gluster_volumes[volume]['ports'] as an array so we can .push() to it.
+ gluster_volumes[volume]['ports'] = []
+
+ volume_ports[volume].each do |port|
+ # port is of type: :REXML::Text. Convert it to String and then Integer
+ port_number = port.to_s.to_i
+ gluster_volumes[volume]['ports'].push(port_number)
+ end
end
- # Gluster facts don't make sense if the Gluster binary isn't present
+ # Export our facts
Facter.add(:gluster_peer_count) do
setcode do
peer_count
@@ -65,21 +117,35 @@
end
end
- unless volume_bricks.empty?
+ # Create a new structured facts containing all peer and volume info.
+ Facter.add(:gluster_peers) do
+ setcode do
+ gluster_peers
+ end
+ end
+
+ Facter.add(:gluster_volumes) do
+ setcode do
+ gluster_volumes
+ end
+ end
+
+ unless gluster_volumes.empty?
Facter.add(:gluster_volume_list) do
setcode do
- volume_bricks.keys.join(',')
+ gluster_volumes.keys.join(',')
end
end
- volume_bricks.each do |vol, bricks|
- Facter.add("gluster_volume_#{vol}_bricks".to_sym) do
+ gluster_volumes.keys.each do |volume|
+ Facter.add("gluster_volume_#{volume}_bricks".to_sym) do
setcode do
- bricks.join(',')
+ gluster_volumes[volume]['bricks'].join(',')
end
end
end
if volume_options
volume_options.each do |vol, opts|
+ # Create flat facts for each volume
Facter.add("gluster_volume_#{vol}_options".to_sym) do
setcode do
opts.join(',')
diff --git a/manifests/volume.pp b/manifests/volume.pp
index 6c0725a2..abc0c336 100644
--- a/manifests/volume.pp
+++ b/manifests/volume.pp
@@ -222,12 +222,9 @@
}
# did the options change?
- $current_options = getvar("gluster_volume_${title}_options")
- if $current_options {
- $_current = sort( split($current_options, ',') )
- } else {
- $_current = []
- }
+ $current_options_hash = pick(fact("gluster_volumes.${title}.options"), {})
+ $_current = sort(join_keys_to_values($current_options_hash, ': '))
+
if $_current != $_options {
#
# either of $current_options or $_options may be empty.
@@ -258,7 +255,7 @@
create_resources( ::gluster::volume::option, $remove )
} else {
$remove_str = join( keys($remove), ', ' )
- notice("NOT REMOVING the following options for volume ${title}:${remove_str}.")
+ notice("NOT REMOVING the following options for volume ${title}: ${remove_str}.")
}
}
if ! empty($to_add) {
diff --git a/spec/unit/lib/facter/gluster_spec.rb b/spec/unit/lib/facter/gluster_spec.rb
new file mode 100644
index 00000000..3d5a825a
--- /dev/null
+++ b/spec/unit/lib/facter/gluster_spec.rb
@@ -0,0 +1,506 @@
+# vim: syntax=ruby tabstop=2 softtabstop=2 shiftwidth=2 fdm=marker
+
+require 'spec_helper'
+
+describe Facter::Util::Fact do
+ before do
+ Facter.clear
+ end
+
+ # {{{ Instance variables
+
+ let(:gluster_binary) { '/usr/sbin/gluster' }
+ let(:gluster_volume_one) { 'volume1' }
+ let(:gluster_brick_path) { "/data/glusterfs/#{gluster_volume_one}/brick1/brick" }
+
+ let(:gluster_peer_one) { 'peer1' } # localhost
+ let(:gluster_peer_one_uuid) { '7d1148a2-f19e-4f18-818f-3396ddf38c30' }
+ let(:gluster_peer_one_port) { 49_153 }
+
+ let(:gluster_peer_two) { 'peer2' }
+ let(:gluster_peer_two_uuid) { 'b8a91151-9d32-43a1-8067-136ec855cb1f' }
+ let(:gluster_peer_two_port) { 49_153 }
+ let(:gluster_peer_three) { 'peer3' }
+ let(:gluster_peer_three_uuid) { '35f53c52-83dc-4100-a1f7-4a7cdeee074d' }
+ let(:gluster_peer_three_port) { 49_152 }
+
+ let(:gluster_peer_shd_port) { 'N/A' }
+
+ # {{{ Gluster peers
+
+ let(:gluster_peer_count) { 2 }
+ let(:gluster_peer_list) { "#{gluster_peer_two},#{gluster_peer_three}" }
+ let(:gluster_peers) do
+ {
+ gluster_peer_two => {
+ 'uuid' => gluster_peer_two_uuid,
+ 'connected' => 1,
+ 'state' => 3,
+ 'status' => 'Peer in Cluster'
+ },
+ gluster_peer_three => {
+ 'uuid' => gluster_peer_three_uuid,
+ 'connected' => 1,
+ 'state' => 3,
+ 'status' => 'Peer in Cluster'
+ }
+ }
+ end
+
+ # }}}
+ # {{{ Gluster volumes
+
+ let(:gluster_volume_list) { gluster_volume_one.to_s }
+
+ let(:gluster_volumes) do
+ {
+ gluster_volume_one => {
+ 'status' => 'Started',
+ 'bricks' => [
+ "#{gluster_peer_one}:#{gluster_brick_path}",
+ "#{gluster_peer_two}:#{gluster_brick_path}",
+ "#{gluster_peer_three}:#{gluster_brick_path}"
+ ],
+ 'features' => {
+ 'features.cache-invalidation' => 'true'
+ },
+ 'options' => {
+ 'nfs.disable' => 'on',
+ 'performance.readdir-ahead' => 'on',
+ 'auth.allow' => '10.10.0.21,10.10.0.22,10.10.0.23'
+ },
+ 'ports' => [
+ gluster_peer_one_port,
+ gluster_peer_two_port,
+ gluster_peer_three_port,
+ gluster_peer_shd_port.to_i, # Self-heal Daemon
+ gluster_peer_shd_port.to_i, # Self-heal Daemon
+ gluster_peer_shd_port.to_i # Self-heal Daemon
+ ]
+ }
+ }
+ end
+
+ # }}}
+ # {{{ Volume options
+
+ let(:gluster_volume_options) do
+ {
+ gluster_volume_one => [
+ "features.cache-invalidation: #{gluster_volumes[gluster_volume_one]['features']['features.cache-invalidation']}",
+ "nfs.disable: #{gluster_volumes[gluster_volume_one]['options']['nfs.disable']}",
+ "performance.readdir-ahead: #{gluster_volumes[gluster_volume_one]['options']['performance.readdir-ahead']}",
+ "auth.allow: #{gluster_volumes[gluster_volume_one]['options']['auth.allow']}"
+ ]
+ }
+ end
+
+ # }}}
+ # {{{ Volume ports
+
+ let(:gluster_volume_ports) do
+ {
+ gluster_volume_one => {
+ 'ports' => [
+ gluster_peer_one_port,
+ gluster_peer_two_port,
+ gluster_peer_three_port,
+ gluster_peer_shd_port, # Self-heal Daemon
+ gluster_peer_shd_port, # Self-heal Daemon
+ gluster_peer_shd_port # Self-heal Daemon
+ ]
+ }
+ }
+ end
+
+ # }}}
+
+ # {{{ Xml
+
+ # {{{ No peer
+
+ let(:gluster_no_peer) do
+ '
+
+ 0
+ 0
+
+
+ '
+ end
+
+ # }}}
+ # {{{ No volume
+
+ let(:gluster_no_volume) do
+ '
+
+ 0
+ 0
+
+
+
+ 0
+
+
+ '
+ end
+
+ # }}}
+ # {{{ Peer status
+
+ let(:gluster_peer_status_xml) do
+ "
+
+ 0
+ 0
+
+
+
+ #{gluster_peers[gluster_peer_two]['uuid']}
+ #{gluster_peer_two}
+
+ #{gluster_peer_two}
+
+ #{gluster_peers[gluster_peer_two]['connected']}
+ #{gluster_peers[gluster_peer_two]['state']}
+ #{gluster_peers[gluster_peer_two]['status']}
+
+
+ #{gluster_peers[gluster_peer_three]['uuid']}
+ #{gluster_peer_three}
+
+ #{gluster_peer_three}
+
+ #{gluster_peers[gluster_peer_three]['connected']}
+ #{gluster_peers[gluster_peer_three]['state']}
+ #{gluster_peers[gluster_peer_three]['status']}
+
+
+ "
+ end
+
+ # }}}
+ # {{{ Volume info
+
+ let(:gluster_volume_info_xml) do
+ "
+
+ 0
+ 0
+
+
+
+
+ #{gluster_volume_one}
+ 208c58eb-44da-467c-b73d-3e52a1d9d544
+ 1
+ #{gluster_volumes[gluster_volume_one]['status']}
+ 0
+ 3
+ 3
+ 1
+ 1
+ 0
+ 3
+ 1
+ 4
+ Disperse
+ 0
+
+
+ #{gluster_peer_one}:#{gluster_brick_path}#{gluster_peer_one}:#{gluster_brick_path}#{gluster_peer_one_uuid}0
+ #{gluster_peer_two}:#{gluster_brick_path}#{gluster_peer_two}:#{gluster_brick_path}#{gluster_peer_two_uuid}0
+ #{gluster_peer_three}:#{gluster_brick_path}#{gluster_peer_three}:#{gluster_brick_path}#{gluster_peer_three_uuid}0
+
+ 4
+
+
+
+
+
+
+
+ 1
+
+
+ "
+ end
+
+ # }}}
+ # {{{ Volume status
+
+ let(:gluster_volume_one_status_xml) do
+ "
+
+ 0
+ 0
+
+
+
+
+ #{gluster_volume_one}
+ 6
+
+ #{gluster_peer_one}
+ #{gluster_brick_path}
+ #{gluster_peer_one_uuid}
+ 1
+ #{gluster_peer_one_port}
+
+ #{gluster_peer_one_port}
+ N/A
+
+ 1773
+
+
+ #{gluster_peer_two}
+ #{gluster_brick_path}
+ #{gluster_peer_two_uuid}
+ 1
+ #{gluster_peer_two_port}
+
+ #{gluster_peer_two_port}
+ N/A
+
+ 1732
+
+
+ #{gluster_peer_three}
+ #{gluster_brick_path}
+ #{gluster_peer_three_uuid}
+ 1
+ #{gluster_peer_three_port}
+
+ #{gluster_peer_three_port}
+ N/A
+
+ 2175
+
+
+ Self-heal Daemon
+ localhost
+ #{gluster_peer_one_uuid}
+ 1
+ #{gluster_peer_shd_port}
+
+ #{gluster_peer_shd_port}
+ N/A
+
+ 12189
+
+
+ Self-heal Daemon
+ #{gluster_peer_three}
+ #{gluster_peer_three_uuid}
+ 1
+ #{gluster_peer_shd_port}
+
+ #{gluster_peer_shd_port}
+ N/A
+
+ 22521
+
+
+ Self-heal Daemon
+ #{gluster_peer_two}
+ #{gluster_peer_two_uuid}
+ 1
+ #{gluster_peer_shd_port}
+
+ #{gluster_peer_shd_port}
+ N/A
+
+ 31403
+
+
+
+
+
+ "
+ end
+
+ # }}}
+
+ # }}}
+
+ # }}}
+
+ # {{{ Gluster not running
+
+ context 'gluster not running' do
+ before do
+ allow(Facter).to receive(:value) # Stub all other calls
+ allow(Facter).to receive(:value).with('gluster_custom_binary').and_return(gluster_binary)
+ allow(File).to receive(:executable?).with(gluster_binary).and_return(true)
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} peer status --xml") {
+ 'Connection failed. Please check if gluster daemon is operational.'
+ }
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} volume info --xml") {
+ 'Connection failed. Please check if gluster daemon is operational.'
+ }
+ end
+ it 'detect gluster binary' do
+ expect(Facter.fact(:gluster_binary).value).to eq(gluster_binary)
+ end
+ it 'null peer count' do
+ expect(Facter.fact(:gluster_peer_count).value).to eq(0)
+ end
+ it 'empty peer list' do
+ expect(Facter.fact(:gluster_peer_list).value).to eq('')
+ end
+ it 'empty peers hash' do
+ expect(Facter.fact(:gluster_peers).value).to eq({})
+ end
+ it 'empty volumes hash' do
+ expect(Facter.fact(:gluster_volumes).value).to eq({})
+ end
+ it 'nil gluster_volume_list' do
+ expect(Facter.fact(:gluster_volume_list)).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_bricks' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_bricks")).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_options' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_options")).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_ports' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_ports")).to eq(nil)
+ end
+ end
+
+ # }}}
+ # {{{ No peers and no volumes
+
+ context 'no peers and no volumes' do
+ before do
+ allow(Facter).to receive(:value) # Stub all other calls
+ allow(Facter).to receive(:value).with('gluster_custom_binary').and_return(gluster_binary)
+ allow(File).to receive(:executable?).with(gluster_binary).and_return(true)
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} peer status --xml") { gluster_no_peer }
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} volume info --xml") { gluster_no_volume }
+ end
+ it 'detect gluster binary' do
+ expect(Facter.fact(:gluster_binary).value).to eq(gluster_binary)
+ end
+ it 'null peer count' do
+ expect(Facter.fact(:gluster_peer_count).value).to eq(0)
+ end
+ it 'empty peer list' do
+ expect(Facter.fact(:gluster_peer_list).value).to eq('')
+ end
+ it 'empty peers hash' do
+ expect(Facter.fact(:gluster_peers).value).to eq({})
+ end
+ it 'empty volumes hash' do
+ expect(Facter.fact(:gluster_volumes).value).to eq({})
+ end
+ it 'nil gluster_volume_list' do
+ expect(Facter.fact(:gluster_volume_list)).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_bricks' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_bricks")).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_options' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_options")).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_ports' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_ports")).to eq(nil)
+ end
+ end
+
+ # }}}
+ # {{{ Two peers and no volumes
+
+ context 'two peers and no volumes' do
+ before do
+ allow(Facter).to receive(:value) # Stub all other calls
+ allow(Facter).to receive(:value).with('gluster_custom_binary').and_return(gluster_binary)
+ allow(File).to receive(:executable?).with(gluster_binary).and_return(true)
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} peer status --xml") { gluster_peer_status_xml }
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} volume info --xml") { gluster_no_volume }
+ end
+ it 'detect gluster binary' do
+ expect(Facter.fact(:gluster_binary).value).to eq(gluster_binary)
+ end
+ it 'check gluster_peer_count integer' do
+ expect(Facter.fact(:gluster_peer_count).value).to eq(gluster_peer_count)
+ end
+ it 'check gluster_peer_list string' do
+ expect(Facter.fact(:gluster_peer_list).value).to eq(gluster_peer_list)
+ end
+ it 'check gluster_peers hash' do
+ expect(Facter.fact(:gluster_peers).value).to eq(gluster_peers)
+ end
+ it 'empty volumes hash' do
+ expect(Facter.fact(:gluster_volumes).value).to eq({})
+ end
+ it 'nil gluster_volume_list' do
+ expect(Facter.fact(:gluster_volume_list)).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_bricks' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_bricks")).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_options' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_options")).to eq(nil)
+ end
+ it 'nil gluster_volume_volume_ports' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_ports")).to eq(nil)
+ end
+ end
+
+ # }}}
+ # {{{ Two peers and one volume
+
+ context 'two peers and one volumes' do
+ before do
+ allow(Facter).to receive(:value) # Stub all other calls
+ allow(Facter).to receive(:value).with('gluster_custom_binary').and_return(gluster_binary)
+ allow(File).to receive(:executable?).with(gluster_binary).and_return(true)
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} peer status --xml") { gluster_peer_status_xml }
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} volume info --xml") { gluster_volume_info_xml }
+ allow(Facter::Util::Resolution).to receive(:exec).with("#{gluster_binary} volume status #{gluster_volume_one} --xml") { gluster_volume_one_status_xml }
+ end
+ it 'detect gluster binary' do
+ expect(Facter.fact(:gluster_binary).value).to eq(gluster_binary)
+ end
+ it 'check gluster_peer_count integer' do
+ expect(Facter.fact(:gluster_peer_count).value).to eq(gluster_peer_count)
+ end
+ it 'check gluster_peer_list string' do
+ expect(Facter.fact(:gluster_peer_list).value).to eq(gluster_peer_list)
+ end
+ it 'check gluster_peers hash' do
+ expect(Facter.fact(:gluster_peers).value).to eq(gluster_peers)
+ end
+ it 'check gluster_volumes hash' do
+ expect(Facter.fact(:gluster_volumes).value).to eq(gluster_volumes)
+ end
+ it 'check gluster_volume_list string' do
+ expect(Facter.fact(:gluster_volume_list).value).to eq(gluster_volume_list)
+ end
+ it 'check gluster_volume_volume_bricks (comma separated string)' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_bricks").value).to eq(gluster_volumes[gluster_volume_one]['bricks'].join(','))
+ end
+ it 'check gluster_volume_volume_options (comma separated string)' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_options").value).to eq(gluster_volume_options[gluster_volume_one].join(','))
+ end
+ it 'check gluster_volume_volume_ports (comma separated string)' do
+ expect(Facter.fact(:"gluster_volume_#{gluster_volume_one}_ports").value).to eq(gluster_volume_ports[gluster_volume_one]['ports'].join(','))
+ end
+ end
+
+ # }}}
+end