Sophie

Sophie

distrib > Mageia > 3 > i586 > media > core-release-src > by-pkgid > 7398562f8e68f6080f02b56a7cbc9445 > files > 3

pcs-0.9.26-2.mga3.src.rpm

diff -uNr pcs-0.9.26/Makefile pcs-0.9.28/Makefile
--- pcs-0.9.26/Makefile	2012-09-24 12:50:52.000000000 -0500
+++ pcs-0.9.28/Makefile	2012-10-22 16:48:38.000000000 -0500
@@ -1,12 +1,20 @@
 ifndef PYTHON_SITELIB
   PYTHON_SITELIB=`python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"`
 endif
+ifndef PREFIX
+  PREFIX=/usr
+endif
 
 install:
-	python setup.py install --prefix ${DESTDIR}/usr
-	mkdir -p ${DESTDIR}/usr/sbin/
-	chmod 755 ${DESTDIR}/${PYTHON_SITELIB}/pcs/pcs.py
-	ln -s ${PYTHON_SITELIB}/pcs/pcs.py ${DESTDIR}/usr/sbin/pcs
+	python setup.py install --prefix ${DESTDIR}${PREFIX}
+	mkdir -p ${DESTDIR}${PREFIX}/sbin/
+	chmod 755 ${DESTDIR}${PYTHON_SITELIB}/pcs/pcs.py
+	ln -s ${PYTHON_SITELIB}/pcs/pcs.py ${DESTDIR}${PREFIX}/sbin/pcs
+
+uninstall:
+	rm -f ${DESTDIR}${PREFIX}/sbin/pcs
+	rm -rf ${DESTDIR}${PYTHON_SITELIB}/pcs*
 
 tarball:
 	python setup.py sdist
+
diff -uNr pcs-0.9.26/PKG-INFO pcs-0.9.28/PKG-INFO
--- pcs-0.9.26/PKG-INFO	2012-09-25 14:34:38.000000000 -0500
+++ pcs-0.9.28/PKG-INFO	2012-10-31 17:35:39.000000000 -0500
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: pcs
-Version: 0.9.26
+Version: 0.9.28
 Summary: Pacemaker Configuration System
 Home-page: http://github.com/feist/pcs
 Author: Chris Feist
diff -uNr pcs-0.9.26/README pcs-0.9.28/README
--- pcs-0.9.26/README	2012-01-23 11:37:11.000000000 -0600
+++ pcs-0.9.28/README	2012-09-28 14:22:13.000000000 -0500
@@ -1 +1,25 @@
 PCS - Pacemaker/Corosync configuration system
+
+Quick install
+
+# tar -xzvf pcs-0.9.26.tar.gz
+# cd pcs-0.9.26
+# make install
+
+This will install pcs into /sbin/pcs
+
+To create a cluster run the following commands on all nodes (replacing node1, node2, node3 with a list of nodes in the cluster.
+# pcs cluster setup --local cluster_name node1 node2 node3
+
+Then run the following command on all nodes:
+# pcs cluster start
+
+After a few moments the cluster should startup and you can get the status of the cluster
+# pcs status
+
+After this you can add resources and stonith agents:
+# pcs resource help
+and
+# pcs stonith help
+
+If you have an questions or concerns please feel free to email cfeist@redhat.com or open a github issue on the pcs project.
diff -uNr pcs-0.9.26/pcs/cluster.py pcs-0.9.28/pcs/cluster.py
--- pcs-0.9.26/pcs/cluster.py	2012-09-25 13:45:08.000000000 -0500
+++ pcs-0.9.28/pcs/cluster.py	2012-10-31 17:33:57.000000000 -0500
@@ -10,11 +10,12 @@
 import prop
 import resource
 import constraint
+import settings
 
 pcs_dir = os.path.dirname(os.path.realpath(__file__))
 COROSYNC_CONFIG_TEMPLATE = pcs_dir + "/corosync.conf.template"
 COROSYNC_CONFIG_FEDORA_TEMPLATE = pcs_dir + "/corosync.conf.fedora.template"
-COROSYNC_CONFIG_FILE = "/etc/corosync/corosync.conf"
+COROSYNC_CONFIG_FILE = settings.corosync_conf_file
 
 def cluster_cmd(argv):
     if len(argv) == 0:
@@ -162,14 +163,14 @@
             print node + ": Offline"
     
 def corosync_setup(argv,returnConfig=False):
-    fedora_config = True
+    fedora_config = not utils.is_rhel6()
     if len(argv) < 2:
         usage.cluster()
         exit(1)
-    if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options:
+    if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options and fedora_config:
         sync_start(argv)
         return
-    elif not returnConfig and not "--local" in utils.pcs_options:
+    elif not returnConfig and not "--local" in utils.pcs_options and fedora_config:
         sync(argv)
         return
     else:
@@ -178,13 +179,10 @@
 
     if fedora_config == True:
         f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r')
-    else:
-        f = open(COROSYNC_CONFIG_TEMPLATE, 'r')
 
-    corosync_config = f.read()
-    f.close()
+        corosync_config = f.read()
+        f.close()
 
-    if fedora_config == True:
         i = 1
         new_nodes_section = ""
         for node in nodes:
@@ -196,11 +194,23 @@
 
         corosync_config = corosync_config.replace("@@nodes", new_nodes_section)
         corosync_config = corosync_config.replace("@@cluster_name",cluster_name)
+        if returnConfig:
+            return corosync_config
 
-    if returnConfig:
-        return corosync_config
+        utils.setCorosyncConf(corosync_config)
+    else:
+        output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--createcluster", cluster_name])
+        if retval != 0:
+            print output
+            print "Error creating cluster:", cluster_name
+            sys.exit(1)
+        for node in nodes:
+            output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--addnode", node])
+            if retval != 0:
+                print output
+                print "Error adding node:", node
+                sys.exit(1)
 
-    utils.setCorosyncConf(corosync_config)
     if "--start" in utils.pcs_options:
         start_cluster([])
 
@@ -221,15 +231,22 @@
             utils.startCluster(node)
             return
 
-    print "Starting Cluster...",
-    output, retval = utils.run(["systemctl", "start","corosync.service"])
-    print output,
-    if retval != 0:
-        print "Error: unable to start corosync"
-        sys.exit(1)
-    output, retval = utils.run(["systemctl", "start", "pacemaker.service"])
-    print output,
+    print "Starting Cluster..."
+    if utils.is_rhel6():
+        output, retval = utils.run(["service", "cman","start"])
+        if retval != 0:
+            print output
+            print "Error: unable to start cman"
+            sys.exit(1)
+    else:
+        output, retval = utils.run(["service", "corosync","start"])
+        if retval != 0:
+            print output
+            print "Error: unable to start corosync"
+            sys.exit(1)
+    output, retval = utils.run(["service", "pacemaker", "start"])
     if retval != 0:
+        print output
         print "Error: unable to start pacemaker"
         sys.exit(1)
 
@@ -257,8 +274,7 @@
             utils.enableCluster(node)
             return
 
-    utils.run(["systemctl", "enable", "corosync.service"])
-    utils.run(["systemctl", "enable", "pacemaker.service"])
+    utils.enableServices()
 
 def disable_cluster(argv):
     if len(argv) > 0:
@@ -266,8 +282,7 @@
             utils.disableCluster(node)
             return
 
-    utils.run(["systemctl", "disable", "corosync.service"])
-    utils.run(["systemctl", "disable", "pacemaker.service"])
+    utils.disableServices()
 
 def enable_cluster_all():
     for node in utils.getNodesFromCorosyncConf():
@@ -283,17 +298,24 @@
             utils.stopCluster(node)
             return
 
-    print "Stopping Cluster...",
-    output, retval = utils.run(["systemctl", "stop","pacemaker.service"])
-    print output,
+    print "Stopping Cluster..."
+    output, retval = utils.run(["service", "pacemaker","stop"])
     if retval != 0:
+        print output,
         print "Error: unable to stop pacemaker"
         sys.exit(1)
-    output, retval = utils.run(["systemctl", "stop","corosync.service"])
-    print output,
-    if retval != 0:
-        print "Error: unable to stop corosync"
-        sys.exit(1)
+    if utils.is_rhel6():
+        output, retval = utils.run(["service", "cman","stop"])
+        if retval != 0:
+            print output,
+            print "Error: unable to stop cman"
+            sys.exit(1)
+    else:
+        output, retval = utils.run(["service", "corosync","stop"])
+        if retval != 0:
+            print output,
+            print "Error: unable to stop corosync"
+            sys.exit(1)
 
 def force_stop_cluster(argv):
     daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"]
diff -uNr pcs-0.9.26/pcs/constraint.py pcs-0.9.28/pcs/constraint.py
--- pcs-0.9.26/pcs/constraint.py	2012-09-12 17:04:45.000000000 -0500
+++ pcs-0.9.28/pcs/constraint.py	2012-10-18 17:44:17.000000000 -0500
@@ -28,7 +28,6 @@
             location_prefer([sub_cmd2] + argv)
         else:
             usage.constraint()
-            print argv
             sys.exit(1)
     elif (sub_cmd == "order"):
         if (len(argv) == 0):
@@ -74,7 +73,6 @@
     elif (sub_cmd == "ref"):
         constraint_ref(argv)
     else:
-        print sub_cmd
         usage.constraint()
         sys.exit(1)
 
@@ -163,6 +161,14 @@
     resource1 = argv.pop(0)
     resource2 = argv.pop(0)
 
+    if not utils.does_resource_exist(resource1):
+        print "Error: Resource '" + resource1 + "' does not exist"
+        sys.exit(1)
+
+    if not utils.does_resource_exist(resource2):
+        print "Error: Resource '" + resource2 + "' does not exist"
+        sys.exit(1)
+
     score,nv_pairs = parse_score_options(argv)
 
     (dom,constraintsElement) = getCurrentConstraints()
@@ -290,6 +296,15 @@
 
     resource1 = argv.pop(0)
     resource2 = argv.pop(0)
+
+    if not utils.does_resource_exist(resource1):
+        print "Error: Resource '" + resource1 + "' does not exist"
+        sys.exit(1)
+
+    if not utils.does_resource_exist(resource2):
+        print "Error: Resource '" + resource2 + "' does not exist"
+        sys.exit(1)
+
     sym = "true" if (len(argv) == 0 or argv[0] != "nonsymmetrical") else "false"
 
     order_options = []
@@ -509,6 +524,10 @@
         resource_name = argv.pop(0)
         node = argv.pop(0)
         score = argv.pop(0)
+        # If resource doesn't exist, we error out
+        if not utils.does_resource_exist(resource_name):
+            print "Error: Resource " + resource_name + "' does not exist"
+            sys.exit(1)
 
     # Verify current constraint doesn't already exist
     # If it does we replace it with the new constraint
diff -uNr pcs-0.9.26/pcs/resource.py pcs-0.9.28/pcs/resource.py
--- pcs-0.9.26/pcs/resource.py	2012-09-24 17:55:46.000000000 -0500
+++ pcs-0.9.28/pcs/resource.py	2012-10-18 15:19:15.000000000 -0500
@@ -248,6 +248,7 @@
     if retval != 0:
         print "Error: unable to get current list of providers"
         print output
+        sys.exit(1)
     print output.strip()
 
 def resource_agents(argv):
diff -uNr pcs-0.9.26/pcs/settings.py pcs-0.9.28/pcs/settings.py
--- pcs-0.9.26/pcs/settings.py	1969-12-31 18:00:00.000000000 -0600
+++ pcs-0.9.28/pcs/settings.py	2012-10-17 15:05:19.000000000 -0500
@@ -0,0 +1,4 @@
+pacemaker_binaries = "/usr/sbin/"
+corosync_binaries = "/usr/sbin/"
+corosync_conf_file = "/etc/corosync/corosync.conf"
+fence_agent_binaries = "/usr/sbin/"
diff -uNr pcs-0.9.26/pcs/status.py pcs-0.9.28/pcs/status.py
--- pcs-0.9.26/pcs/status.py	2012-09-25 13:44:45.000000000 -0500
+++ pcs-0.9.28/pcs/status.py	2012-10-17 14:26:32.000000000 -0500
@@ -38,7 +38,7 @@
         sys.exit(1)
 
 def full_status():
-    (output, retval) = utils.run(["/usr/sbin/crm_mon", "-1", "-r"])
+    (output, retval) = utils.run(["crm_mon", "-1", "-r"])
 
     if (retval != 0):
         print "Error running crm_mon, is pacemaker running?"
@@ -151,7 +151,7 @@
         print "- " + resource.getAttribute("role") + " " + node_line
 
 def cluster_status(argv):
-    (output, retval) = utils.run(["/usr/sbin/crm_mon", "-1", "-r"])
+    (output, retval) = utils.run(["crm_mon", "-1", "-r"])
 
     if (retval != 0):
         print "Error running crm_mon, is pacemaker running?"
@@ -171,7 +171,7 @@
             print "",line
 
 def corosync_status():
-    (output, retval) = utils.run(["/sbin/corosync-quorumtool", "-l"])
+    (output, retval) = utils.run(["corosync-quorumtool", "-l"])
     if retval != 0:
         print "Error: Corosync not running"
         sys.exit(1)
@@ -179,7 +179,7 @@
         print output,
 
 def xml_status():
-    (output, retval) = utils.run(["/usr/sbin/crm_mon", "-1", "-r", "-X"])
+    (output, retval) = utils.run(["crm_mon", "-1", "-r", "-X"])
 
     if (retval != 0):
         print "Error running crm_mon, is pacemaker running?"
diff -uNr pcs-0.9.26/pcs/stonith.py pcs-0.9.28/pcs/stonith.py
--- pcs-0.9.26/pcs/stonith.py	2012-09-04 16:41:58.000000000 -0500
+++ pcs-0.9.28/pcs/stonith.py	2012-10-17 14:52:32.000000000 -0500
@@ -49,8 +49,12 @@
         stn_id = argv.pop(0)
         resource.resource_update(stn_id,argv)
     elif (sub_cmd == "delete"):
-        stn_id = argv.pop(0)
-        resource_remove(stn_id)
+        if len(argv) > 0:
+            stn_id = argv.pop(0)
+            resource.resource_remove(stn_id)
+        else:
+            usage.stonith()
+            sys.exit(1)
     elif (sub_cmd == "show"):
         stonith_show(argv)
     else:
@@ -90,10 +94,10 @@
 
     bad_fence_devices = ["kdump_send", "legacy", "na", "nss_wrapper",
             "pcmk", "vmware_helper", "ack_manual", "virtd"]
-    fence_devices = sorted(glob.glob("/usr/sbin/fence_*"))
+    fence_devices = sorted(glob.glob(utils.fence_bin + "fence_*"))
     for bfd in bad_fence_devices:
         try:
-            fence_devices.remove("/usr/sbin/fence_"+bfd)
+            fence_devices.remove(utils.fence_bin + "fence_"+bfd)
         except ValueError:
             continue
 
@@ -115,7 +119,7 @@
         print fd + sd
 
 def stonith_list_options(stonith_agent):
-    metadata = get_metadata("/usr/sbin/" + stonith_agent)
+    metadata = get_metadata(utils.fence_bin + stonith_agent)
     if not metadata:
         print "Unable to get metadata for %s" % stonith_agent
         sys.exit(1)
diff -uNr pcs-0.9.26/pcs/usage.py pcs-0.9.28/pcs/usage.py
--- pcs-0.9.26/pcs/usage.py	2012-09-25 14:31:03.000000000 -0500
+++ pcs-0.9.28/pcs/usage.py	2012-10-31 17:12:10.000000000 -0500
@@ -126,6 +126,16 @@
 Configure cluster for use with pacemaker
 
 Commands:
+    auth [node] [...] [-u username] [-p password]
+        Authenticate pcs to pcsd on nodes specified, or on all nodes
+        configured in corosync.conf if no nodes are specified (authorization
+        tokens are stored in ~/.pcs/token)
+
+    setup [--start] [--local] <cluster name> <node1 name> [node2] [...]
+        Configure corosync and sync configuration out to listed nodes
+        --local will only perform changes on the local node
+        --start will also start the cluster on the specified nodes
+
     start [--all] [node] [...]
         Start corosync & pacemaker on specified node(s), if a node is not
         specified then corosync & pacemaker are started on the local node.
@@ -169,11 +179,6 @@
         Get current status of pcsd on nodes specified, or on all nodes
         configured in corosync.conf if no nodes are specified
 
-    auth [node] [...] [-u username] [-p password]
-        Authenticate pcs to pcsd on nodes specified, or on all nodes
-        configured in corosync.conf if no nodes are specified (authorization
-        tokens are stored in ~/.pcs/token)
-
     token <node>
         Get authorization token for specified node
 
@@ -181,11 +186,6 @@
         Sync corosync configuration to all nodes found from current
         corosync.conf file
 
-    setup [--start] [--local] <cluster name> <node1 name> [node2] [...]
-        Configure corosync and sync configuration out to listed nodes
-        --local will only perform changes on the local node
-        --start will also start the cluster on the specified nodes
-
     cib [filename]
         Get the raw xml from the CIB (Cluster Information Base).  If a
         filename is provided, we save the cib to that file, otherwise the cib
diff -uNr pcs-0.9.26/pcs/utils.py pcs-0.9.28/pcs/utils.py
--- pcs-0.9.26/pcs/utils.py	2012-09-25 13:42:05.000000000 -0500
+++ pcs-0.9.28/pcs/utils.py	2012-10-31 17:22:56.000000000 -0500
@@ -7,12 +7,14 @@
 import xml.etree.ElementTree as ET
 import re
 import json
+import settings
 
 
 # usefile & filename variables are set in pcs module
 usefile = False
 filename = ""
 pcs_options = {}
+fence_bin = settings.fence_agent_binaries
 
 # Check status of node
 def checkStatus(node):
@@ -114,7 +116,7 @@
 # 2 = No response,
 # 3 = Auth Error
 def sendHTTPRequest(host, request, data = None, printResult = True):
-    url = 'http://' + host + ':2222/' + request
+    url = 'https://' + host + ':2224/' + request
     opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
     tokens = readTokens()
     if host in tokens:
@@ -161,14 +163,14 @@
     ret_nodes.sort()
     return ret_nodes
 
-def getCorosyncConf(conf='/etc/corosync/corosync.conf'):
+def getCorosyncConf(conf=settings.corosync_conf_file):
     try:
         out = open(conf).read()
     except IOError:
         return ""
     return out
 
-def setCorosyncConf(corosync_config, conf_file='/etc/corosync/corosync.conf'):
+def setCorosyncConf(corosync_config, conf_file=settings.corosync_conf_file):
     try:
         f = open(conf_file,'w')
         f.write(corosync_config)
@@ -178,7 +180,7 @@
         exit(1)
 
 def getCorosyncActiveNodes():
-    args = ["/sbin/corosync-cmapctl"]
+    args = ["corosync-cmapctl"]
     nodes = []
     output,retval = run(args)
     if retval != 0:
@@ -250,9 +252,9 @@
         new_corosync_conf += corosync_conf[count:]
         setCorosyncConf(new_corosync_conf)
 
-        run(["/sbin/corosync-cmapctl", "-s", "nodelist.node." +
+        run(["corosync-cmapctl", "-s", "nodelist.node." +
             str(new_nodeid - 1) + ".nodeid", "u32", str(new_nodeid)])
-        run(["/sbin/corosync-cmapctl", "-s", "nodelist.node." +
+        run(["corosync-cmapctl", "-s", "nodelist.node." +
             str(new_nodeid - 1) + ".ring0_addr", "str", node])
     else:
         print "Unable to find nodelist in corosync.conf"
@@ -286,7 +288,7 @@
                 new_corosync_conf = "\n".join(corosync_conf[0:x] + corosync_conf[x+4:])
                 print new_corosync_conf
                 setCorosyncConf(new_corosync_conf)
-                run(["/sbin/corosync-cmapctl", "-D", "nodelist.node." +
+                run(["corosync-cmapctl", "-D", "nodelist.node." +
                     str(nodeid) + "."])
 
     if error:
@@ -317,6 +319,12 @@
                 print "Unable to write to file: " + filename
                 sys.exit(1)
 
+    command = args[0]
+    if command[0:3] == "crm" or command == "cibadmin":
+        args[0] = settings.pacemaker_binaries + command
+    if command[0:8] == "corosync":
+        args[0] = settings.corosync_binaries + command
+        
     try:
         if ignore_stderr:
             p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env = env_var)
@@ -339,6 +347,9 @@
         return False
     return True
 
+def does_resource_exist(resource_id):
+    return does_exist("//primitive[@id='"+resource_id+"']")
+
 # Return matches from the CIB with the xpath_query
 def get_cib_xpath(xpath_query):
     args = ["cibadmin", "-Q", "--xpath", xpath_query]
@@ -478,7 +489,7 @@
 
 # Returns an xml dom containing the current status of the cluster
 def getClusterState():
-    (output, retval) = run(["/usr/sbin/crm_mon", "-1", "-X","-r"])
+    (output, retval) = run(["crm_mon", "-1", "-X","-r"])
     if (retval != 0):
         print "Error running crm_mon, is pacemaker running?"
         sys.exit(1)
@@ -500,3 +511,36 @@
     f = open(filename, 'w')
     f.write(empty_xml)
     f.close()
+
+def is_systemctl():
+    if os.path.exists('/usr/bin/systemctl'):
+        return True
+    else:
+        return False
+
+def is_rhel6():
+    try:
+        issue = open('/etc/issue').read()
+    except IOError as e:
+        return False
+
+    if re.search(r'Red Hat Enterprise Linux Server release 6\.', issue):
+        return True
+    else:
+        return False
+
+def enableServices():
+    if is_systemctl():
+        utils.run(["systemctl", "enable", "corosync.service"])
+        utils.run(["systemctl", "enable", "pacemaker.service"])
+    else:
+        utils.run(["chkconfig", "corosync", "on"])
+        utils.run(["chkconfig", "pacemaker", "on"])
+
+def disableServices():
+    if is_systemctl():
+        utils.run(["systemctl", "disable", "corosync.service"])
+        utils.run(["systemctl", "disable", "pacemaker.service"])
+    else:
+        utils.run(["chkconfig", "corosync", "off"])
+        utils.run(["chkconfig", "pacemaker", "off"])