All of lore.kernel.org
 help / color / mirror / Atom feed
* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2007-02-20 23:07 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2007-02-20 23:07 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-02-20 23:07:00

Modified files:
	luci/cluster   : form-macros index_html resource-form-macros 
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	- Only allow resources using the new application resource agents to be added and configured if the 'rgmanager-app-agents' package is installed.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.191&r2=1.192
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.30&r2=1.31
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&r1=1.33&r2=1.34
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.241&r2=1.242
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.56&r2=1.57

--- conga/luci/cluster/form-macros	2007/02/16 05:26:18	1.191
+++ conga/luci/cluster/form-macros	2007/02/20 23:06:59	1.192
@@ -4117,6 +4117,8 @@
 	<tal:block metal:use-macro="here/form-macros/macros/service-config-head-macro" />
 
 	<h2>Add a Service</h2>
+	<tal:block tal:define="
+		global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
 	<div id="resskel" class="invisible">
 		<tal:block metal:use-macro="here/resource-form-macros/macros/service-compose-macro" />
@@ -4178,8 +4180,6 @@
 
 	<div class="service_comp_list">
 		<form name="master" method="post">
-		<tal:block
-			tal:define="global clusterinfo python: here.getClusterInfo(modelb, request)" />
 		<input type="button" value="Add a resource to this service"
 			onclick="add_child_resource(this.form);" />
 		<input type="hidden" name="pagetype"
@@ -4395,6 +4395,8 @@
 	<br/>
 
 	<h2>Service Composition</h2>
+	<tal:block tal:define="
+		global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
 	<div id="resskel" class="invisible">
 		<tal:block metal:use-macro="here/resource-form-macros/macros/service-compose-macro" />
@@ -4474,8 +4476,6 @@
 		</form>
 
 		<form name="master" method="post">
-		<tal:block
-			tal:define="global clusterinfo python: here.getClusterInfo(modelb, request)" />
 		<input type="hidden" name="pagetype"
 			tal:attributes="
 				value request/pagetype | request/form/pagetype | nothing" />
--- conga/luci/cluster/index_html	2006/12/21 05:08:48	1.30
+++ conga/luci/cluster/index_html	2007/02/20 23:06:59	1.31
@@ -32,10 +32,6 @@
 			global ri_agent nothing;
 			global busywaiting python:None" />
 
-		<tal:block tal:condition="not: hascluster">
-		    <meta googaa="ooo"/>
-		</tal:block>
-
 		<tal:block tal:condition="hascluster">
 			<tal:block tal:define="
 				global ri_agent python:here.getRicciAgentForCluster(request);
@@ -45,6 +41,10 @@
 				global isBusy python:here.isClusterBusy(request);
 				global firsttime request/busyfirst |nothing" />
 
+			<tal:block tal:condition="ri_agent">
+				<tal:block tal:define="dummy python:request.SESSION.set('ricci', ri_agent)" />
+			</tal:block>
+
 			<tal:block tal:condition="firsttime">
 				<tal:block tal:define="global busywaiting python:True" />
 				<meta http-equiv="refresh"
--- conga/luci/cluster/resource-form-macros	2007/02/16 02:06:08	1.33
+++ conga/luci/cluster/resource-form-macros	2007/02/20 23:06:59	1.34
@@ -120,7 +120,7 @@
 			<option name="SMB" value="SMB">Samba</option>
 
 			<tal:block
-				tal:condition="python: os_version and os_version == 'rhel4'">
+				tal:condition="python:clusterinfo and 'has_rgmanager_app_agents' in clusterinfo and clusterinfo['has_rgmanager_app_agents'] is True">
 				<option name="APACHE" value="APACHE">Apache</option>
 				<option name="LVM" value="LVM">LVM</option>
 				<option name="MYSQL" value="MYSQL">MySQL</option>
@@ -170,7 +170,7 @@
 			<option name="SMB" value="SMB">Samba</option>
 
 			<tal:block
-				tal:condition="python: os_version and os_version == 'rhel4'">
+				tal:condition="python:clusterinfo and 'has_rgmanager_app_agents' in clusterinfo and clusterinfo['has_rgmanager_app_agents'] is True">
 				<option name="APACHE" value="APACHE">Apache</option>
 				<option name="LVM" value="LVM">LVM</option>
 				<option name="MYSQL" value="MYSQL">MySQL</option>
@@ -238,7 +238,10 @@
 	</script>
 
 	<tal:block tal:define="
-		global res python: here.getResourceInfo(modelb, request);" />
+		global res python: here.getResourceInfo(modelb, request)" />
+
+	<tal:block tal:define="
+		global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
 	<h2>Add a Resource</h2>
 
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/16 05:26:18	1.241
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/20 23:07:00	1.242
@@ -3895,7 +3895,18 @@
         luci_log.debug_verbose('GCI0: unable to determine cluster name')
         return {}
 
+  has_app_agents = False
+  try:
+    ricci = req.SESSION.get('ricci')
+    if not ricci:
+      raise Exception, 'blank'
+    has_app_agents = has_rgmanager_app_agents(ricci)
+  except Exception, e:
+    luci_log.debug_verbose('GCI0a: %s' % str(e))
+    has_app_agents = False
+
   clumap = {}
+  clumap['has_rgmanager_app_agents'] = has_app_agents
   if model is None:
     try:
       model = getModelForCluster(self, cluname)
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2007/01/31 19:28:08	1.56
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2007/02/20 23:07:00	1.57
@@ -486,6 +486,43 @@
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
+def has_rgmanager_app_agents(rc):
+	batch_str = '<module name="rpm"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml"><rpm name="rgmanager-app-agents"/></var></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		luci_log.debug_verbose('HRAA0: None returned')
+		return None
+
+	rpm_tags = ricci_xml.getElementsByTagName('rpm')
+	if not rpm_tags or len(rpm_tags) < 1:
+		luci_log.debug_verbose('HRAA: unexpected response %s' \
+			% ricci_xml.toxml())
+		return None
+
+	has_agents = False
+	for i in rpm_tags:
+		try:
+			name = i.getAttribute('name')
+			if not name:
+				raise Exception, 'blank'
+		except Exception, e:
+			luci_log.debug_verbose('HRAA2: %s' % str(e))
+			continue
+
+		if name.strip().lower() != 'rgmanager-app-agents':
+			continue
+
+		try:
+			version = i.getAttribute('version').strip()
+			if version:
+				has_agents = True
+		except Exception, e:
+			luci_log.debug_verbose('HRAA3: %s' % str(e))
+		break
+
+	return has_agents
+
 def nodeIsVirtual(rc):
 	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="virt_guest"/></request></module>'
 



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2007-07-26  4:16 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2007-07-26  4:16 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-07-26 04:16:46

Modified files:
	luci/cluster   : form-macros index_html 
	luci/homebase  : validate_cluster_add.js 

Log message:
	Fix for 249091

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.25&r2=1.90.2.26
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20.2.12&r2=1.20.2.13
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.4&r2=1.4.2.5

--- conga/luci/cluster/form-macros	2007/07/12 04:41:39	1.90.2.25
+++ conga/luci/cluster/form-macros	2007/07/26 04:16:46	1.90.2.26
@@ -270,7 +270,7 @@
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
 					<input type="checkbox" name="enable_storage"
 						tal:attributes="
-							checked add_cluster/shared_storage | nothing" />
+							checked add_cluster/shared_storage |string:checked" />
 					Enable Shared Storage Support
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
@@ -515,6 +515,7 @@
 			tal:attributes="value cur_sysnum" />
 
 		<div class="hbSubmit" id="hbSubmit">
+			<input type="hidden" name="cluster_create" value="1" />
 			<input type="button" name="Submit" value="Submit"
 				onClick="validate_cluster_create(this.form)" />
 		</div>
@@ -2567,6 +2568,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_apc" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2610,6 +2613,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_egenera" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2646,6 +2651,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_wti" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2682,6 +2689,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_brocade" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2718,6 +2727,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_vixel" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2754,6 +2765,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_sanbox2" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2790,6 +2803,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_mcdata" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2826,6 +2841,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_gnbd" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2862,6 +2879,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_bladecenter" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2898,6 +2917,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_bullpap" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2921,6 +2942,8 @@
 			</tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_scsi" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -2957,6 +2980,8 @@
 			</td></tr>
 		</table>
 
+		<input type="hidden" name="option" tal:condition="exists:cur_instance"
+			tal:attributes="value cur_instance/option |nothing" />
 		<input type="hidden" name="fence_type" value="fence_xvm" />
 		<input type="hidden" name="fence_instance" value="1" />
 		<input tal:condition="exists: cur_instance"
@@ -3116,7 +3141,7 @@
 					<option tal:attributes="value nodeinfo/delete_url"
 						tal:condition="python: not 'ricci_error' in nodeinfo">
 						Delete this node</option>
-					<option tal:attributes="value nodeinfo/force_delete_url"
+					<option tal:attributes="value nodeinfo/force_delete_url | nothing"
 						tal:condition="python: 'ricci_error' in nodeinfo">
 						Force the deletion of this node</option>
 				</select>
--- conga/luci/cluster/index_html	2007/07/12 04:41:39	1.20.2.12
+++ conga/luci/cluster/index_html	2007/07/26 04:16:46	1.20.2.13
@@ -44,7 +44,7 @@
 			<tal:block tal:condition="firsttime">
 				<tal:block tal:define="global busywaiting python:True" />
 				<meta http-equiv="refresh"
-					tal:attributes="content isBusy/refreshurl | python:'3%surl=/luci/cluster' % chr(0x3b)" />
+					tal:attributes="content isBusy/refreshurl | python:'5%surl=/luci/cluster' % chr(0x3b)" />
 			</tal:block>
 
 			<tal:block tal:define="global busy isBusy/busy | nothing" />
@@ -52,7 +52,7 @@
 			<tal:block tal:condition="busy">
 				<tal:block tal:define="global busywaiting python:True" />
 				<meta http-equiv="refresh"
-					tal:attributes="content isBusy/refreshurl | python:'3%surl=/luci/cluster' % chr(0x3b)" />
+					tal:attributes="content isBusy/refreshurl | python:'5%surl=/luci/cluster' % chr(0x3b)" />
 			</tal:block>
 		</tal:block>
     </metal:headslot>
--- conga/luci/homebase/validate_cluster_add.js	2007/07/12 04:41:39	1.4.2.4
+++ conga/luci/homebase/validate_cluster_add.js	2007/07/26 04:16:46	1.4.2.5
@@ -43,9 +43,13 @@
 	if (!view_certs || !view_certs.checked) {
 		var confirm_str = '';
 		if (form.addnode) {
-			confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?';
+			confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?\nEach node added will be rebooted during this process.';
 		} else {
-			confirm_str = 'Add the cluster \"' + clustername + '\" to the Luci management interface?';
+			if (form.cluster_create) {
+				confirm_str = 'All nodes added to this cluster will be rebooted as part of this process.\n\nCreate cluster \"' + clustername + '\"?';
+			} else {
+				confirm_str = 'Add the cluster \"' + clustername + '\" to the Luci management interface?';
+			}
 		}
 
 		if (confirm(confirm_str)) {



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2007-02-20 23:09 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2007-02-20 23:09 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-02-20 23:09:37

Modified files:
	luci/cluster   : form-macros index_html resource-form-macros 
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	- Only allow resources using the new application resource agents to be added and configured if the 'rgmanager-app-agents' package is installed.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.176.2.12&r2=1.176.2.13
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.30&r2=1.30.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.31.2.2&r2=1.31.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.9&r2=1.227.2.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.56&r2=1.56.2.1

--- conga/luci/cluster/form-macros	2007/02/16 05:29:38	1.176.2.12
+++ conga/luci/cluster/form-macros	2007/02/20 23:09:37	1.176.2.13
@@ -4117,6 +4117,8 @@
 	<tal:block metal:use-macro="here/form-macros/macros/service-config-head-macro" />
 
 	<h2>Add a Service</h2>
+	<tal:block tal:define="
+		global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
 	<div id="resskel" class="invisible">
 		<tal:block metal:use-macro="here/resource-form-macros/macros/service-compose-macro" />
@@ -4178,8 +4180,6 @@
 
 	<div class="service_comp_list">
 		<form name="master" method="post">
-		<tal:block
-			tal:define="global clusterinfo python: here.getClusterInfo(modelb, request)" />
 		<input type="button" value="Add a resource to this service"
 			onclick="add_child_resource(this.form);" />
 		<input type="hidden" name="pagetype"
@@ -4395,6 +4395,8 @@
 	<br/>
 
 	<h2>Service Composition</h2>
+	<tal:block tal:define="
+		global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
 	<div id="resskel" class="invisible">
 		<tal:block metal:use-macro="here/resource-form-macros/macros/service-compose-macro" />
@@ -4474,8 +4476,6 @@
 		</form>
 
 		<form name="master" method="post">
-		<tal:block
-			tal:define="global clusterinfo python: here.getClusterInfo(modelb, request)" />
 		<input type="hidden" name="pagetype"
 			tal:attributes="
 				value request/pagetype | request/form/pagetype | nothing" />
--- conga/luci/cluster/index_html	2006/12/21 05:08:48	1.30
+++ conga/luci/cluster/index_html	2007/02/20 23:09:37	1.30.2.1
@@ -32,10 +32,6 @@
 			global ri_agent nothing;
 			global busywaiting python:None" />
 
-		<tal:block tal:condition="not: hascluster">
-		    <meta googaa="ooo"/>
-		</tal:block>
-
 		<tal:block tal:condition="hascluster">
 			<tal:block tal:define="
 				global ri_agent python:here.getRicciAgentForCluster(request);
@@ -45,6 +41,10 @@
 				global isBusy python:here.isClusterBusy(request);
 				global firsttime request/busyfirst |nothing" />
 
+			<tal:block tal:condition="ri_agent">
+				<tal:block tal:define="dummy python:request.SESSION.set('ricci', ri_agent)" />
+			</tal:block>
+
 			<tal:block tal:condition="firsttime">
 				<tal:block tal:define="global busywaiting python:True" />
 				<meta http-equiv="refresh"
--- conga/luci/cluster/resource-form-macros	2007/02/16 02:12:46	1.31.2.2
+++ conga/luci/cluster/resource-form-macros	2007/02/20 23:09:37	1.31.2.3
@@ -120,7 +120,7 @@
 			<option name="SMB" value="SMB">Samba</option>
 
 			<tal:block
-				tal:condition="python: os_version and os_version == 'rhel4'">
+				tal:condition="python:clusterinfo and 'has_rgmanager_app_agents' in clusterinfo and clusterinfo['has_rgmanager_app_agents'] is True">
 				<option name="APACHE" value="APACHE">Apache</option>
 				<option name="LVM" value="LVM">LVM</option>
 				<option name="MYSQL" value="MYSQL">MySQL</option>
@@ -170,7 +170,7 @@
 			<option name="SMB" value="SMB">Samba</option>
 
 			<tal:block
-				tal:condition="python: os_version and os_version == 'rhel4'">
+				tal:condition="python:clusterinfo and 'has_rgmanager_app_agents' in clusterinfo and clusterinfo['has_rgmanager_app_agents'] is True">
 				<option name="APACHE" value="APACHE">Apache</option>
 				<option name="LVM" value="LVM">LVM</option>
 				<option name="MYSQL" value="MYSQL">MySQL</option>
@@ -238,7 +238,10 @@
 	</script>
 
 	<tal:block tal:define="
-		global res python: here.getResourceInfo(modelb, request);" />
+		global res python: here.getResourceInfo(modelb, request)" />
+
+	<tal:block tal:define="
+		global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
 	<h2>Add a Resource</h2>
 
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/16 05:29:38	1.227.2.9
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/20 23:09:37	1.227.2.10
@@ -3895,7 +3895,18 @@
         luci_log.debug_verbose('GCI0: unable to determine cluster name')
         return {}
 
+  has_app_agents = False
+  try:
+    ricci = req.SESSION.get('ricci')
+    if not ricci:
+      raise Exception, 'blank'
+    has_app_agents = has_rgmanager_app_agents(ricci)
+  except Exception, e:
+    luci_log.debug_verbose('GCI0a: %s' % str(e))
+    has_app_agents = False
+
   clumap = {}
+  clumap['has_rgmanager_app_agents'] = has_app_agents
   if model is None:
     try:
       model = getModelForCluster(self, cluname)
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2007/01/31 19:28:08	1.56
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2007/02/20 23:09:37	1.56.2.1
@@ -486,6 +486,43 @@
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
+def has_rgmanager_app_agents(rc):
+	batch_str = '<module name="rpm"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml"><rpm name="rgmanager-app-agents"/></var></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		luci_log.debug_verbose('HRAA0: None returned')
+		return None
+
+	rpm_tags = ricci_xml.getElementsByTagName('rpm')
+	if not rpm_tags or len(rpm_tags) < 1:
+		luci_log.debug_verbose('HRAA: unexpected response %s' \
+			% ricci_xml.toxml())
+		return None
+
+	has_agents = False
+	for i in rpm_tags:
+		try:
+			name = i.getAttribute('name')
+			if not name:
+				raise Exception, 'blank'
+		except Exception, e:
+			luci_log.debug_verbose('HRAA2: %s' % str(e))
+			continue
+
+		if name.strip().lower() != 'rgmanager-app-agents':
+			continue
+
+		try:
+			version = i.getAttribute('version').strip()
+			if version:
+				has_agents = True
+		except Exception, e:
+			luci_log.debug_verbose('HRAA3: %s' % str(e))
+		break
+
+	return has_agents
+
 def nodeIsVirtual(rc):
 	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="virt_guest"/></request></module>'
 



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-12-21  5:08 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-12-21  5:08 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-12-21 05:08:49

Modified files:
	luci/cluster   : form-macros index_html validate_config_qdisk.js 
	luci/homebase  : form-macros homebase_common.js index_html 
	                 luci_homebase.css validate_sys_remove.js 
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py ricci_bridge.py 
	                           ricci_communicator.py 

Log message:
	most of the rest of fixes for bz201394. minor cleanup/polish still forthcoming.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.135&r2=1.136
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.29&r2=1.30
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_config_qdisk.js.diff?cvsroot=cluster&r1=1.4&r2=1.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/form-macros.diff?cvsroot=cluster&r1=1.49&r2=1.50
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/homebase_common.js.diff?cvsroot=cluster&r1=1.14&r2=1.15
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/index_html.diff?cvsroot=cluster&r1=1.20&r2=1.21
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/luci_homebase.css.diff?cvsroot=cluster&r1=1.30&r2=1.31
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_sys_remove.js.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.190&r2=1.191
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.41&r2=1.42
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.51&r2=1.52
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.22&r2=1.23

--- conga/luci/cluster/form-macros	2006/12/20 22:07:16	1.135
+++ conga/luci/cluster/form-macros	2006/12/21 05:08:48	1.136
@@ -208,173 +208,226 @@
 	<tal:block tal:omit-tag=""
 		tal:define="global sessionObj python: request.SESSION.get('checkRet')" />
 
-	<form name="adminform" action="" method="post">
-		<input name="pagetype" id="pagetype" type="hidden" value="6" />
+	<h1>Add a cluster</h1>
 
-		<h1>Add a cluster</h1>
+	<form name="create_cluster" action="" method="post"
+		tal:define="
+			global add_cluster request/SESSION/create_cluster | nothing">
+
+		<input name="pagetype" type="hidden"
+			tal:attributes="value request/form/pagetype | request/pagetype |string:6" />
+
+		<input name="cluster_os" type="hidden"
+			tal:attributes="value add_cluster/cluster_os | nothing" />
 
-		<tal:block tal:condition="python: not sessionObj or not 'requestResults' in sessionObj or not 'nodeList' in sessionObj['requestResults']">
-		<input name="numStorage" type="hidden" value="3" />
 		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
 			<thead class="systemsTable">
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<div class="systemsTableTop">
-						<strong>Cluster Name</strong>
-						<input class="hbInputSys" type="text" id="clusterName" name="clusterName" />
-					</div>
-				</td></tr>
+                <tr class="systemsTable"><td class="systemsTable" colspan="2">
+                    <div class="systemsTableTop">
+                        <strong>Cluster Name</strong>
+                        <input class="hbInputSys" type="text"
+							id="clusterName" name="clusterName"
+							tal:attributes="value add_cluster/name | nothing" />
+                    </div>
+                </td></tr>
 				<tr class="systemsTable">
-					<th class="systemsTable">System Hostname</th>
-					<th class="systemsTable">Password</th>
+					<th class="systemsTable">Node Hostname</th>
+					<th class="systemsTable">Root Password</th>
+					<tal:block tal:condition="add_cluster">
+						<th class="systemsTable">Key ID</th>
+						<th class="systemsTable">Trust</th>
+					</tal:block>
+					<th></th>
 				</tr>
 			</thead>
 
 			<tfoot class="systemsTable">
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
 					<ul class="vanilla deploy">
-						<li class="vanilla"><input type="radio" name="rhn_dl" value="1" checked="checked" />Download packages</li>
-						<li class="vanilla"><input type="radio" name="rhn_dl" value="0" />Use locally installed packages.</li>
+						<li class="vanilla">
+							<input type="radio" name="download_pkgs"
+								value="1" checked="checked" />
+							Download packages
+						</li>
+						<li class="vanilla">
+							<input type="radio" name="download_pkgs"
+								value="0" />
+							Use locally installed packages.
+						</li>
 					</ul>
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<input type="checkbox" value="1" name="enable_storage" />Enable Shared Storage Support
+					<input type="checkbox" name="enable_storage" />
+					Enable Shared Storage Support
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<div>
-						<input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if cluster node passwords are identical.
-					</div>
+					<ul class="vanilla">
+						<li class="vanilla">
+							<input name="check_certs" type="checkbox"
+								tal:attributes="checked python: (add_cluster and add_cluster['check_certs']) and 'checked'" />
+							View system certificates before sending any passwords.
+						</li>
+						<li class="vanilla">
+							<input type="checkbox"
+								name="allSameCheckBox" id="allSameCheckBox"
+								onClick="allPasswdsSame(this.form)"
+								tal:attributes="checked python: (add_cluster and add_cluster['identical_passwds']) and 'checked'"
+							/>
+							
+							Check if node passwords are identical.
+						</li>
+					</ul>
 				</td></tr>
-
 				<tr class="systemsTable"><td class="systemsTable" colspan="2">
 					<div class="systemsTableEnd">
-						<input type="button" value="Add Another Row" onClick="addSystem(adminform);" />
+						<input type="button" value="Add another entry"
+							onClick="addSystem(this.form)" />
 					</div>
 				</td></tr>
 			</tfoot>
 
+			<tal:block tal:define="global cur_sysnum python:0" />
+
 			<tbody class="systemsTable">
-				<tr class="systemsTable">
+			 <tal:block
+				tal:condition="exists: add_cluster/nodes"
+				tal:repeat="cur_sys add_cluster/nodes">
+				<tr class="systemsTable"
+					tal:attributes="id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+					tal:define="sys python: add_cluster['nodes'][cur_sys]">
+					<td class="systemsTable">
+						<input type="text"
+							tal:attributes="
+								value sys/host | nothing;
+								id python: '__SYSTEM%d:Addr' % cur_sysnum;
+								name python: '__SYSTEM%d:Addr' % cur_sysnum;
+								class python: 'hbInputSys' + ('errors' in sys and ' error' or '');
+								disabled python: ('auth' in sys and sys['host'].count('.') > 0) and 1 or 0"
+						 />
+					</td>
+					<td class="systemsTable">
+						<tal:block tal:condition="not: exists: sys/auth">
+							<input type="password"
+								onChange="pwd0Change(this.form)"
+								autocomplete="off"
+								tal:attributes="
+									value sys/passwd | nothing;
+									class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+						</tal:block>
+
+						<tal:block tal:condition="exists: sys/auth">
+							<input type="text" onChange="pwd0Change(this.form)"
+								disabled="disabled" value="[authenticated]"
+								tal:attributes="
+									class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+						</tal:block>
+					</td>
+					<td tal:condition="add_cluster" class="systemsTable">
+						<img 
+							tal:attributes="
+								src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+								title sys/fp | string:no key fingerprint available" />
+						<input type="hidden"
+							tal:attributes="
+								id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+								name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+								value sys/fp | nothing" />
+					</td>
+					<td tal:condition="add_cluster" class="systemsTable">
+						<input type="checkbox" tal:attributes="
+							checked exists: sys/fp;
+							id python: '__SYSTEM%dTrusted' % cur_sysnum;
+							name python: '__SYSTEM%dTrusted' % cur_sysnum;
+							disabled python: 'trusted' in sys"
+						/>
+					</td>
+					<td class="systemsTable">
+						<img src="/luci/delete-row.png" class="deleteRow"
+							title="delete this row"
+							tal:attributes="
+								onclick python: 'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
+					</td>
+				</tr>
+				<tal:block
+					tal:define="global cur_sysnum python: cur_sysnum + 1" />
+			 </tal:block>
+
+				<tr class="systemsTable" id="__SYSTEM_ROW_0"
+					tal:condition="not: add_cluster">
 					<td class="systemsTable">
 						<input class="hbInputSys" type="text"
 							id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
 					</td>
 					<td class="systemsTable">
 						<input type="password"
-							id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
+							onChange="pwd0Change(this.form)"
 							class="hbInputPass" autocomplete="off"
-							onChange="pwd0Change(adminform);" />
+							onChange="pwd0Change(this.form)"
+							id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd" />
+					</td>
+					<td class="systemsTable">
+						<img src="/luci/delete-row.png" class="deleteRow"
+							title="delete this row"
+							onclick="delete_element_id('__SYSTEM_ROW_0')" />
 					</td>
 				</tr>
-
-				<tr class="systemsTable">
+				<tr class="systemsTable" id="__SYSTEM_ROW_1"
+					tal:condition="not: add_cluster">
 					<td class="systemsTable">
 						<input class="hbInputSys" type="text"
 							id="__SYSTEM1:Addr" name="__SYSTEM1:Addr" />
 					</td>
 					<td class="systemsTable">
 						<input type="password"
-							id="__SYSTEM1:Passwd" name="__SYSTEM1:Passwd"
+							onChange="pwd0Change(this.form)"
 							class="hbInputPass" autocomplete="off"
-							onChange="pwd0Change(adminform);" />
+							id="__SYSTEM1:Passwd" name="__SYSTEM1:Passwd" />
+					</td>
+					<td class="systemsTable">
+						<img src="/luci/delete-row.png" class="deleteRow"
+							title="delete this row"
+							onclick="delete_element_id('__SYSTEM_ROW_1')" />
 					</td>
 				</tr>
-
-				<tr class="systemsTable">
+				<tr class="systemsTable" id="__SYSTEM_ROW_2"
+					tal:condition="not: add_cluster">
 					<td class="systemsTable">
 						<input class="hbInputSys" type="text"
 							id="__SYSTEM2:Addr" name="__SYSTEM2:Addr" />
 					</td>
 					<td class="systemsTable">
 						<input type="password"
-							id="__SYSTEM2:Passwd" name="__SYSTEM2:Passwd"
+							onChange="pwd0Change(this.form)"
 							class="hbInputPass" autocomplete="off"
-							onChange="pwd0Change(adminform);" />
+							id="__SYSTEM2:Passwd" name="__SYSTEM2:Passwd" />
 					</td>
-				</tr>
-			</tbody>
-		</table>
-		</tal:block>
-
-		<tal:block tal:condition="python: sessionObj and 'requestResults' in sessionObj and 'nodeList' in sessionObj['requestResults']">
-
-		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
-			<thead class="systemsTable">
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<div class="systemsTableTop">
-						<strong>Cluster Name:</strong>
-						<input type="text" id="clusterName" name="clusterName"
-							tal:attributes="value python: sessionObj['requestResults']['clusterName']" />
-					</div>
-				</td></tr>
-				<tr class="systemsTable">
-					<th class="systemsTable">Node Hostname</th>
-					<th class="systemsTable">Root Password</th>
-				</tr>
-			</thead>
-
-			<tfoot class="systemsTable">
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<ul class="vanilla deploy">
-						<li class="vanilla"><input type="radio" name="rhn_dl" value="1" checked="checked" />Download packages</li>
-						<li class="vanilla"><input type="radio" name="rhn_dl" value="0" />Use locally installed packages.</li>
-					</ul>
-				</td></tr>
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<input type="checkbox" value="1" name="enable_storage" />Enable Shared Storage Support
-				</td></tr>
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<div>
-						<input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if cluster node passwords are identical.
-					</div>
-				</td></tr>
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<div class="systemsTableEnd">
-						<input type="button" value="Add Another Row" onClick="addSystem(adminform);" />
-					</div>
-				</td></tr>
-			</tfoot>
-
-			<span tal:omit-tag="" tal:define="global sysNum python: 0" />
-
-			<tbody class="systemsTable">
-			<tal:block tal:repeat="node python: sessionObj['requestResults']['nodeList']">
-				<span tal:omit-tag=""
-					tal:define="global nodeAuth python: node['cur_auth']" />
-
-				<tr class="systemsTable">
 					<td class="systemsTable">
-						<input type="text"
-							tal:attributes="
-								id python: '__SYSTEM' + str(sysNum) + ':Addr';
-								name python: '__SYSTEM' + str(sysNum) + ':Addr';
-								value python: node['ricci_host'];
-								class python: 'hbInputSys' + ('errors' in node and ' error' or '')"
-						 />
-					</td>
-					<td class="systemsTable">
-						<input
-							onChange="pwd0Change(adminform);"
-							tal:attributes="
-								type python: nodeAuth and 'text' or 'password';
-								value python: nodeAuth and '[authenticated]' or '';
-								class python: 'hbInputPass' + ('errors' in node and ' error' or '');
-								id python: '__SYSTEM' + str(sysNum) + ':Passwd';
-								name python: '__SYSTEM' + str(sysNum) + ':Passwd'"
-						/>
+						<img src="/luci/delete-row.png" class="deleteRow"
+							title="delete this row"
+							onclick="delete_element_id('__SYSTEM_ROW_2')" />
 					</td>
+					<tal:block tal:define="global cur_sysnum python:3" />
 				</tr>
-				<span tal:omit-tag="" tal:define="global sysNum python: sysNum + 1" />
-			</tal:block>
 			</tbody>
 		</table>
-		<input type="hidden" name="numStorage" tal:attributes="value python: sysNum" />
 
-		</tal:block>
+		<input name="numStorage" id="numStorage" type="hidden"
+			tal:attributes="value cur_sysnum" />
 
 		<div class="hbSubmit" id="hbSubmit">
-			<input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+			<input type="button" name="Submit" value="Submit"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
+
+	<div tal:condition="add_cluster">
+		<tal:block
+			tal:define="x python: request.SESSION.delete('create_cluster')" />
+	</div>
 </div>
 
 <div metal:define-macro="clusterconfig-form">
@@ -1018,9 +1071,10 @@
 						<input class="qdscore qdisk" type="text" name="heuristic0:hscore" id="heuristic0:hscore" value="">
 					</td>
 					<td class="systemsTable">
-						<img class="qdscore qdisk qdel_img"
+						<img class="qdisk deleteRow"
 							id="heuristic0:hdel" name="heuristic0:hdel"
-							src="/luci/homebase/x.png"
+							src="/luci/delete-row.png"
+							title="delete this heuristic"
 							onClick="delete_qdisk_heur(this, document.quorum_partition);">
 					</td>
 				</tr>
@@ -1067,8 +1121,9 @@
 								name python: 'heuristic' + str(curHeur) + ':hscore';"/>
 					</td>
 					<td class="systemsTable">
-						<img class="qdscore qdisk qdel_img"
-							src="/luci/homebase/x.png"
+						<img class="qdisk deleteRow"
+							src="/luci/homebase/delete-row.png"
+							title="delete this heuristic"
 							onClick="delete_qdisk_heur(this, document.quorum_partition);"
 							tal:attributes="
 								id python: 'heuristic' + str(curHeur) + ':hdel';
@@ -2804,6 +2859,7 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? Add a new cluster node');
 	</script>
+
 	<script type="text/javascript"
 		src="/luci/homebase/homebase_common.js">
 	</script>
@@ -2812,79 +2868,191 @@
 		src="/luci/homebase/validate_cluster_add.js">
 	</script>
 
-	<input type="hidden" name="clusterName"
-		tal:attributes="value request/form/clusterName | request/clustername | none"
-	/>
 
-	<form name="adminform" action="" method="post">
-		<input name="numStorage" type="hidden" value="1" />
-		<input name="pagetype" type="hidden" value="15" />
-		<input name="addnode" type="hidden" value="1" />
+	<form name="add_node" action="" method="post"
+		tal:define="
+			global add_cluster request/SESSION/add_node | nothing;
+			global cur_cluster_name add_cluster/name | request/clustername | request/form/clusterName | nothing">
+
+		<h2>Add a node to <span tal:replace="cur_cluster_name | string:this cluster" /></h2>
 		<input type="hidden" name="clusterName"
-			tal:attributes="
-				value request/form/clusterName | request/clustername | nothing"
-		/>
+            tal:attributes="value cur_cluster_name | string:[unknown]" />
 
-		<h2>Add a node to <span tal:replace="request/form/clusterName | request/clustername | string:the cluster" /></h2>
+		<input name="pagetype" type="hidden"
+			tal:attributes="value request/form/pagetype | request/pagetype | string:15" />
 
-		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
+		<input name="cluster_os" type="hidden"
+			tal:attributes="value add_cluster/cluster_os | nothing" />
+
+		<table id="systemsTable" class="systemsTable" cellspacing="0">
 			<thead class="systemsTable">
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<div class="systemsTableTop">
-						<strong>Cluster Name</strong> <span tal:content="request/form/clusterName | request/clustername | none" />
-					</div>
-				</td></tr>
 				<tr class="systemsTable">
-					<th class="systemsTable">System Hostname</th>
-					<th class="systemsTable">Password</th>
+					<th class="systemsTable">Node Hostname</th>
+					<th class="systemsTable">Root Password</th>
+					<tal:block tal:condition="add_cluster">
+						<th class="systemsTable">Key ID</th>
+						<th class="systemsTable">Trust</th>
+					</tal:block>
+					<th></th>
 				</tr>
 			</thead>
 
 			<tfoot class="systemsTable">
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
 					<ul class="vanilla deploy">
-						<li class="vanilla"><input type="radio" name="rhn_dl" value="1" checked="checked" />Download packages</li>
-						<li class="vanilla"><input type="radio" name="rhn_dl" value="0" />Use locally installed packages.</li>
+						<li class="vanilla">
+							<input type="radio" name="download_pkgs" value="1"
+								tal:attributes="
+									checked add_system/download_pkgs | string:checked" />
+							Download packages
+						</li>
+						<li class="vanilla">
+							<input type="radio" name="download_pkgs" value="0"
+								tal:attributes="
+									checked not: add_system/download_pkgs | nothing" />
+							
+							Use locally installed packages.
+						</li>
 					</ul>
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<input type="checkbox" value="1" name="enable_storage" />Enable Shared Storage Support
+					<input type="checkbox" name="enable_storage"
+						tal:attributes="
+							checked add_system/shared_storage | nothing" />
+					Enable Shared Storage Support
 				</td></tr>
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<div id="allSameDiv">
-						<input type="checkbox" class="allSameCheckBox"
-							name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/>
-						Check if cluster node passwords are identical.
-					</div>
+					<ul class="vanilla">
+						<li class="vanilla">
+							<input name="check_certs" type="checkbox"
+								tal:attributes="checked python: (add_cluster and add_cluster['check_certs']) and 'checked'" />
+							View system certificates before sending any passwords.
+						</li>
+						<li class="vanilla"
+							tal:attributes="id python: (not add_cluster or ('nodes' in add_cluster and len(add_cluster['nodes']) < 2)) and 'allSameDiv'">
+							<input type="checkbox"
+								name="allSameCheckBox" id="allSameCheckBox"
+								onClick="allPasswdsSame(this.form)"
+								tal:attributes="checked python: (add_cluster and add_cluster['identical_passwds']) and 'checked'"
+							/>
+							Check if node passwords are identical.
+						</li>
+					</ul>
 				</td></tr>
-
 				<tr class="systemsTable"><td class="systemsTable" colspan="2">
 					<div class="systemsTableEnd">
-						<input type="button" value="Add Another Row" onClick="addSystem(adminform);" />
+						<input type="button" value="Add another entry"
+							onClick="addSystem(this.form)" />
 					</div>
 				</td></tr>
 			</tfoot>
 
+			<tal:block tal:define="global cur_sysnum python:0" />
+
 			<tbody class="systemsTable">
-				<tr class="systemsTable">
+			 <tal:block
+				tal:condition="exists: add_cluster/nodes"
+				tal:repeat="cur_sys add_cluster/nodes">
+				<tr class="systemsTable"
+					tal:attributes="id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+					tal:define="sys python: add_cluster['nodes'][cur_sys]">
+					<td class="systemsTable">
+						<input type="text"
+							tal:attributes="
+								value sys/host | nothing;
+								id python: '__SYSTEM%d:Addr' % cur_sysnum;
+								name python: '__SYSTEM%d:Addr' % cur_sysnum;
+								class python: 'hbInputSys' + ('errors' in sys and ' error' or '');
+								disabled python: ('auth' in sys and sys['host'].count('.') > 0) and 1 or 0"
+						 />
+					</td>
+					<td class="systemsTable">
+						<tal:block tal:condition="not: exists: sys/auth">
+							<input type="password"
+								autocomplete="off"
+								onChange="pwd0Change(this.form)"
+								tal:attributes="
+									value sys/passwd | nothing;
+									class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+						</tal:block>
+
+						<tal:block tal:condition="exists: sys/auth">
+							<input type="text" onChange="pwd0Change(this.form)"
+								disabled="disabled" value="[authenticated]"
+								tal:attributes="
+									class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+						</tal:block>
+					</td>
+					<td tal:condition="add_cluster" class="systemsTable">
+						<img 
+							tal:attributes="
+								src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+								title sys/fp | string:no key fingerprint available" />
+						<input type="hidden"
+							tal:attributes="
+								id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+								name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+								value sys/fp | nothing" />
+					</td>
+					<td tal:condition="add_cluster" class="systemsTable">
+						<input type="checkbox" tal:attributes="
+							checked exists: sys/fp;
+							id python: '__SYSTEM%dTrusted' % cur_sysnum;
+							name python: '__SYSTEM%dTrusted' % cur_sysnum;
+							disabled python: 'trusted' in sys"
+						/>
+					</td>
+					<td class="systemsTable">
+						<img src="/luci/delete-row.png" class="deleteRow"
+							title="delete this row"
+							tal:attributes="
+								onclick python: 'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
+					</td>
+				</tr>
+				<tal:block
+					tal:define="global cur_sysnum python: cur_sysnum + 1" />
+			 </tal:block>
+
+				<tr class="systemsTable" id="__SYSTEM_ROW_0"
+					tal:condition="not: add_cluster">
 					<td class="systemsTable">
 						<input class="hbInputSys" type="text"
 							id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
 					</td>
 					<td class="systemsTable">
 						<input type="password"
-							id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
+							onChange="pwd0Change(this.form)"
 							class="hbInputPass" autocomplete="off"
-							onChange="pwd0Change(adminform);" />
+							onChange="pwd0Change(this.form)"
+							id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd" />
+					</td>
+					<td class="systemsTable">
+						<img src="/luci/delete-row.png" class="deleteRow"
+							title="delete this row"
+							onclick="delete_element_id('__SYSTEM_ROW_0')" />
 					</td>
+					<tal:block tal:define="global cur_sysnum python:1" />
 				</tr>
 			</tbody>
 		</table>
 
+		<input name="numStorage" id="numStorage" type="hidden"
+			tal:attributes="value cur_sysnum" />
+
 		<div class="hbSubmit" id="hbSubmit">
-			<input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+			<input type="button" name="Submit" value="Submit"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
+
+	<div tal:condition="add_cluster">
+		<tal:block
+			tal:define="x python: request.SESSION.delete('add_node')" />
+	</div>
 </div>
 
 <div metal:define-macro="nodeprocess-form">
@@ -2893,14 +3061,14 @@
 
 		<div id="errmsgsdiv" class="errmsgs"
 			tal:condition="python: result and len(result) > 1 and 'errors' in result[1]">
-            <p class="errmsgs">The following errors occurred:</p>
+			<p class="errmsgs">The following errors occurred:</p>
 
-            <ul class="errmsgs">
-                <tal:block tal:repeat="e python: result[1]['errors']">
-                    <li class="errmsgs" tal:content="python:e" />
-                </tal:block>
-            </ul>
-        </div>
+			<ul class="statusmsg">
+				<tal:block tal:repeat="e python: result[1]['errors']">
+					<li class="statusmsg" tal:content="python:e" />
+				</tal:block>
+			</ul>
+		</div>
 	</tal:block>
 </div>
 
--- conga/luci/cluster/index_html	2006/11/29 18:39:50	1.29
+++ conga/luci/cluster/index_html	2006/12/21 05:08:48	1.30
@@ -207,23 +207,24 @@
 		<tal:block tal:define="ret python: request.SESSION.get('checkRet')">
 		<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
 			<div class="hbclosebox">
-				<a href="javascript:hide_element('retmsgsdiv');"><img src="../homebase/x.png"></a>
+				<a href="javascript:hide_element('retmsgsdiv')"><img src="/luci/homebase/x.png" class="closeBox" title="dismiss"></a>
 			</div>
-			<ul class="retmsgs">
+			<p class="retmsgs">Status messages:</p>
+			<ul class="statusmsg">
 				<tal:block tal:repeat="e python:ret['messages']">
-					<li class="retmsgs" tal:content="python:e" />
+					<li class="statusmsg" tal:content="python:e" />
 				</tal:block>
 			</ul>
 		</div>
 
 		<div id="errmsgsdiv" class="errmsgs" tal:condition="python:(ret and 'errors' in ret and len(ret['errors']))">
 			<div class="hbclosebox">
-				<a class="hbclosebox" href="javascript:hide_element('errmsgsdiv');"><img src="../homebase/x.png"></a>
+				<a class="hbclosebox" href="javascript:hide_element('errmsgsdiv')"><img src="/luci/homebase/x.png" class="closeBox" title="dismiss"></a>
 			</div>
 			<p class="errmsgs">The following errors occurred:</p>
-			<ul class="errmsgs">
+			<ul class="statusmsg">
 				<tal:block tal:repeat="e python:ret['errors']">
-					<li class="errmsgs" tal:content="python:e" />
+					<li class="statusmsg" tal:content="python:e" />
 				</tal:block>
 			</ul>
 		</div>
--- conga/luci/cluster/validate_config_qdisk.js	2006/10/04 17:24:58	1.4
+++ conga/luci/cluster/validate_config_qdisk.js	2006/12/21 05:08:48	1.5
@@ -314,10 +314,11 @@
 	var del_td = document.createElement('td');
 	del_td.className = 'systemsTable';
 	var del_img = document.createElement('img');
-	del_img.className = 'qdscore qdisk qdel_img';
+	del_img.className = 'qdisk deleteRow';
 	del_img.setAttribute('name', hstr + ':hdel');
 	del_img.setAttribute('id', hstr + ':hdel');
-	del_img.setAttribute('src', '/luci/homebase/x.png');
+	del_img.setAttribute('src', '/luci/delete-row.png');
+	del_img.setAttribute('title', 'delete this row');
 	del_img.setAttribute('onClick', 'delete_qdisk_heur(this, document.quorum_partition)');
 	del_td.appendChild(del_img);
 
--- conga/luci/homebase/form-macros	2006/11/01 23:04:17	1.49
+++ conga/luci/homebase/form-macros	2006/12/21 05:08:48	1.50
@@ -71,7 +71,8 @@
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
 
 		<div class="hbSubmit" tal:condition="python:userList" id="hbSubmit">
-			<input name="Submit" type="button" value="Delete This User" onClick="validateForm(document.adminform);" />
+			<input name="Submit" type="button" value="Delete This User"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
 
@@ -140,7 +141,8 @@
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
 
 		<div class="hbSubmit" id="hbSubmit">
-			<input name="Submit" type="button" value="Submit" onClick="validateForm(document.adminform);" />
+			<input name="Submit" type="button" value="Submit"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
 </div>
@@ -196,7 +198,7 @@
 
 		<span tal:condition="python:perms" tal:content="string:Select a User" /><br/>
 
-		<select tal:omit-tag="python: not perms" class="homebase" name="userList" onChange="document.location = adminform.baseURL.value + '&user=' + adminform.userList.options[adminform.userList.selectedIndex].text">
+		<select tal:omit-tag="python: not perms" class="homebase" name="userList" onChange="document.location = this.form.baseURL.value + '&user=' + this.form.userList.options[this.form.userList.selectedIndex].text">
 			<tal:block tal:repeat="user python:perms">
 				<option class="homebase"
 					tal:content="python:user"
@@ -250,12 +252,14 @@
 			tal:attributes="value python: num_clusters + 1" />
 
 		<div class="hbSubmit" id="hbSubmit">
-			<input type="button" name="Update Permissions" value="Update Permissions" onClick="validateForm(document.adminform);" />
+			<input type="button" name="Update Permissions" value="Update Permissions"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
 
 	<div tal:condition="python: blankForm">
-		<p>Either no users have been added or no clusters or storage systems are being managed by Luci.</p>
+		<p>Either no users have been added or no clusters
+			or storage systems are managed by Luci.</p>
 	</div>
 </div>
 
@@ -287,93 +291,191 @@
 		set_page_title('Luci ??? homebase ??? Remove a system or cluster from Luci');
 	</script>
 
-	<span tal:omit-tag=""
-		tal:define="global systems python:here.getSystems();
-					global blankForm python:1;
-					global num_clusters python:-1;
-					global num_systems python:-1"
-	/>
-
 	<h2 class="homebase">Manage Systems and Clusters</h2>
 
 	<h3>Authenticate to Storage or Cluster Systems</h3>
 
 	<form name="authform" method="post" action="">
-		<input type="hidden" name="pagetype" value="8" />
-		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
-			<thead class="systemsTable">
-				<tr class="systemsTable">
-					<th class="systemsTable">System Hostname</th>
-					<th class="systemsTable">Root Password</th>
-				</tr>
-			</thead>
+		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0"
+			tal:define="
+				new_systems request/SESSION/auth_systems | nothing;
+				global cur_sysnum python: 1">
 
-			<tfoot class="systemsTable">
-				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<div id="allSameDiv" class="invisible">
-						<input type="checkbox" class="allSameCheckBox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(this.form);"/><span>Check if storage system passwords are identical.</span>
-					</div>
-				</td></tr>
-
-				<tr class="systemsTable"><td class="systemsTable" colspan="2">
-					<div class="systemsTableEnd">
-						<input type="button" value="Add another entry" onClick="addSystem(this.form);" />
-					</div>
-				</td></tr>
-			</tfoot>
+			<tal:block tal:condition="not: new_systems">
+				<thead class="systemsTable">
+					<tr class="systemsTable">
+						<th class="systemsTable">System Hostname</th>
+						<th class="systemsTable">Root Password</th>
+						<th class="systemsTable"></th>
+					</tr>
+				</thead>
+			
+				<tbody class="systemsTable">
+					<tr class="systemsTable" id="__SYSTEM_ROW_0">
+						<td class="systemsTable">
+							<input class="hbInputSys" type="text"
+								id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+						</td>
+						<td class="systemsTable">
+							<input type="password"
+								autocomplete="off"
+								id="__SYSTEM0:Passwd"
+								name="__SYSTEM0:Passwd"
+								class="hbInputPass"
+								onChange="pwd0Change(this.form)" />
+						</td>
+						<td class="systemsTable">
+							<img src="/luci/delete-row.png" class="deleteRow"
+								title="delete this row"
+								onclick="delete_element_id('__SYSTEM_ROW_0')" />
+						</td>
+					</tr>
+				</tbody>
+			</tal:block>
 
-			<tbody class="systemsTable">
-				<tr class="systemsTable">
-					<td class="systemsTable">
-						<input class="hbInputSys" type="text"
-							id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
-					</td>
-					<td class="systemsTable">
-						<input type="password" autocomplete="off"
-							id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
-							class="hbInputPass"
-							onChange="pwd0Change(this.form);" />
-					</td>
-				</tr>
+			<tal:block tal:condition="new_systems">
+				<thead class="systemsTable">
+					<tr class="systemsTable">
+						<th class="systemsTable">System Hostname</th>
+						<th class="systemsTable">Root Password</th>
+						<th class="systemsTable">Key ID</th>
+						<th class="systemsTable">Trust</th>
+						<th class="systemsTable"></th>
+					</tr>
+				</thead>
+				<tal:block tal:define="global cur_sysnum python: 0" />
 
-				<tal:block tal:define="global numsys python: 0" />
+				<tbody class="systemsTable">
+				<tal:block tal:repeat="cur_sys new_systems">
+					<tr class="systemsTable"
+						tal:attributes="
+							id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+						tal:define="sys python: new_systems[cur_sys]">
 
-				<tal:block tal:repeat="s python:systems[2]">
-					<tal:block tal:define="global numsys python: numsys + 1" />
-					<tr class="systemsTable">
 						<td class="systemsTable">
 							<input class="hbInputSys" type="text"
 								tal:attributes="
-									id python: '__SYSTEM' + str(numsys) + ':Addr';
-									name python: '__SYSTEM' + str(numsys) + ':Addr'" />
+									id python: '__SYSTEM%d:Addr' % cur_sysnum;
+									name python: '__SYSTEM%d:Addr' % cur_sysnum;
+									value sys/host | nothing" />
 						</td>
-
 						<td class="systemsTable">
-							<input type="password" autocomplete="off"
-								onChange="pwd0Change(this.form);"
+							<input type="password"
+								autocomplete="off"
 								class="hbInputPass"
+								onChange="pwd0Change(this.form)"
+								tal:attributes="
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									value sys/passwd | nothing" />
+						</td>
+						<td class="systemsTable">
+							<img 
 								tal:attributes="
-									id python: '__SYSTEM' + str(numsys) + ':Password';
-									name python: '__SYSTEM' + str(numsys) + ':Password'" />
+									src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+									title sys/fp | string:no key fingerprint available"
+							/>
+							<input type="hidden"
+								tal:attributes="
+									id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+									name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+									value sys/fp | nothing" />
+						</td>
+						<td class="systemsTable">
+							<input type="checkbox" checked tal:attributes="
+								id python: '__SYSTEM%dTrusted' % cur_sysnum;
+								name python: '__SYSTEM%dTrusted' % cur_sysnum;
+								disabled python: 'trusted' in sys"
+							/>
+						</td>
+						<td class="systemsTable">
+							<img src="/luci/delete-row.png" class="deleteRow"
+								title="delete this row"
+								tal:attributes="onclick python:'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
 						</td>
 					</tr>
+					<tal:block
+						tal:define="global cur_sysnum python: cur_sysnum + 1" />
 				</tal:block>
 			</tbody>
+			<tal:block
+				tal:define="
+					x python: request.SESSION.delete('auth_systems')" />
+			</tal:block>
+
+			<tfoot class="systemsTable">
+				<tr class="systemsTable"><td colspan="2" class="systemsTable">
+					<ul class="vanilla">
+						<li class="vanilla"><input name="check_certs" type="checkbox">View system certificates before sending any passwords.</li>
+						<li class="vanilla"
+							tal:attributes="id python: cur_sysnum < 2 and 'allSameDiv' or ''">
+							<input type="checkbox" name="allSameCheckBox"
+								id="allSameCheckBox" onClick="allPasswdsSame(this.form)" />
+							Check if system passwords are identical.
+						</li>
+					</ul>
+				</td></tr>
+
+				<tr class="systemsTable"><td class="systemsTable" colspan="2">
+					<div class="systemsTableEnd">
+						<input type="button" value="Add another entry"
+							onClick="addSystem(this.form)" />
+					</div>
+				</td></tr>
+			</tfoot>
 		</table>
 
-		<input type="hidden" name="numStorage" value="1" />
+		<input name="numStorage" id="numStorage" type="hidden"
+			tal:attributes="value cur_sysnum | string:1" />
+
+		<input type="hidden" name="pagetype" value="8" />
 
 		<div class="hbSubmit" id="hbSubmit">
 			<input type="button" name="Submit" value="Submit"
 				onClick="validateAuth(this.form)" />
 		</div>
+
+		<tal:block tal:condition="exists: request/SESSION/auth_status">
+			<div class="retmsgs" id="auth_retmsgsdiv"
+				tal:condition="exists: request/SESSION/auth_status/messages">
+				<div class="hbclosebox">
+					<a href="javascript:hide_element('auth_retmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
+				</div>
+				<p class="retmsgs">Status messages:</p>
+				<ul class="statusmsg">
+					<tal:block tal:repeat="e request/SESSION/auth_status/messages">
+						<li class="statusmsg" tal:content="e" />
+					</tal:block>
+				</ul>
+			</div>
+			<div class="errmsgs" id="auth_errmsgsdiv"
+				tal:condition="exists: request/SESSION/auth_status/errors">
+				<div class="hbclosebox">
+					<a href="javascript:hide_element('auth_errmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
+				</div>
+				<p class="errmsgs">The following errors occurred:</p>
+				<ul class="statusmsg">
+					<tal:block tal:repeat="e request/SESSION/auth_status/errors">
+						<li class="statusmsg" tal:content="e" />
+					</tal:block>
+				</ul>
+			</div>
+			<tal:block
+				tal:define="x python: request.SESSION.delete('auth_status')" />
+			<div class="padding">&nbsp;</div>
+		</tal:block>
 	</form>
 
+	<tal:block tal:define="
+		global systems python:here.getSystems();
+		global blankForm python:1;
+		global num_clusters python:-1;
+		global num_systems python:-1" />
+
 	<form name="adminform" method="post" action=""
 		tal:condition="python:(systems[0] and len(systems[0]) > 0) or (systems[1] and len(systems[1]) > 0)">
 
-		<span tal:omit-tag="" tal:define="global blankForm python:0" />
-
+		<tal:block tal:define="global blankForm python:0" />
 
 		<input type="hidden" name="pagetype"
 			tal:attributes="value request/form/pagetype | request/pagetype | nothing" />
@@ -425,19 +527,20 @@
 			</tal:block>
 		</div>
 
-		<input type="hidden" id="numStorage"
+		<input type="hidden" id="num_storage"
 			tal:attributes="value python: num_systems + 1" />
 
-		<input type="hidden" id="numClusters"
+		<input type="hidden" id="num_clusters"
 			tal:attributes="value python: num_clusters + 1" />
 
-		<div class="hbSubmit" id="hbSubmit">
-			<input type="button" name="Submit" value="Remove Selected Systems" onClick="validateForm(document.adminform);" />
+		<div class="hbSubmit">
+			<input type="button" name="Submit" value="Remove selected entries"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
 
 	<div tal:condition="python: blankForm">
-		<p>No clusters or storage systems are currently being managed by Luci.</p>
+		<p>No clusters or storage systems are currently managed by Luci.</p>
 	</div>
 </div>
 
@@ -476,51 +579,141 @@
 		<input name="pagetype" type="hidden"
 			tal:attributes="value request/form/pagetype | request/pagetype | nothing" />
 
-		<input name="numStorage" id="numStorage" type="hidden" value="1" />
-
 		<input name="absoluteURL" type="hidden"
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
 
-		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
-			<thead class="systemsTable">
-				<tr class="systemsTable">
-					<th class="systemsTable">System Hostname</th>
-					<th class="systemsTable">Root Password</th>
-				</tr>
-			</thead>
+		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0"
+			tal:define="
+				new_systems request/SESSION/add_systems | nothing;
+				global cur_sysnum python: 1">
+
+			<tal:block tal:condition="not: new_systems">
+				<thead class="systemsTable">
+					<tr class="systemsTable">
+						<th class="systemsTable">System Hostname</th>
+						<th class="systemsTable">Root Password</th>
+						<th class="systemsTable"></th>
+					</tr>
+				</thead>
+			
+				<tbody class="systemsTable">
+					<tr class="systemsTable" id="__SYSTEM_ROW_0">
+						<td class="systemsTable">
+							<input class="hbInputSys" type="text"
+								id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+						</td>
+						<td class="systemsTable">
+							<input type="password"
+								autocomplete="off"
+								id="__SYSTEM0:Passwd"
+								name="__SYSTEM0:Passwd"
+								class="hbInputPass"
+								onChange="pwd0Change(this.form)" />
+						</td>
+						<td class="systemsTable">
+							<img src="/luci/delete-row.png" class="deleteRow"
+								title="delete this row"
+								onclick="delete_element_id('__SYSTEM_ROW_0')" />
+						</td>
+					</tr>
+				</tbody>
+			</tal:block>
+
+			<tal:block tal:condition="new_systems">
+				<thead class="systemsTable">
+					<tr class="systemsTable">
+						<th class="systemsTable">System Hostname</th>
+						<th class="systemsTable">Root Password</th>
+						<th class="systemsTable">Key ID</th>
+						<th class="systemsTable">Trust</th>
+						<th class="systemsTable"></th>
+					</tr>
+				</thead>
+				<tal:block tal:define="global cur_sysnum python: 0" />
+
+				<tbody class="systemsTable">
+				<tal:block tal:repeat="cur_sys new_systems">
+					<tr class="systemsTable"
+						tal:attributes="
+							id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+						tal:define="sys python: new_systems[cur_sys]">
+
+						<td class="systemsTable">
+							<input class="hbInputSys" type="text"
+								tal:attributes="
+									id python: '__SYSTEM%d:Addr' % cur_sysnum;
+									name python: '__SYSTEM%d:Addr' % cur_sysnum;
+									value sys/host | nothing" />
+						</td>
+						<td class="systemsTable">
+							<input type="password"
+								autocomplete="off"
+								class="hbInputPass"
+								onChange="pwd0Change(this.form)"
+								tal:attributes="
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									value sys/passwd | nothing" />
+									
+						</td>
+						<td class="systemsTable">
+							<img 
+								tal:attributes="
+									src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+									title sys/fp | string:no key fingerprint available"
+							/>
+							<input type="hidden"
+								tal:attributes="
+									id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+									name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+									value sys/fp | nothing" />
+						</td>
+						<td class="systemsTable">
+							<input type="checkbox" checked tal:attributes="
+								id python: '__SYSTEM%dTrusted' % cur_sysnum;
+								name python: '__SYSTEM%dTrusted' % cur_sysnum;
+								disabled python: 'trusted' in sys"
+							/>
+						</td>
+						<td class="systemsTable">
+							<img src="/luci/delete-row.png" class="deleteRow"
+								title="delete this row"
+								tal:attributes="onclick python:'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
+						</td>
+					</tr>
+					<tal:block
+						tal:define="global cur_sysnum python: cur_sysnum + 1" />
+				</tal:block>
+			</tbody>
+			<tal:block
+				tal:define="
+					x python: request.SESSION.delete('add_systems')" />
+			</tal:block>
 
 			<tfoot class="systemsTable">
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<div id="allSameDiv">
-						<input type="checkbox" class="allSameCheckBox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/><span>Check if storage system passwords are identical.</span>
-					</div>
+					<ul class="vanilla">
+						<li class="vanilla"><input name="check_certs" type="checkbox">View system certificates before sending any passwords.</li>
+						<li class="vanilla"
+							tal:attributes="id python: cur_sysnum < 2 and 'allSameDiv' or ''"><input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(this.form);"/>Check if storage system passwords are identical.</li>
+					</ul>
 				</td></tr>
 
 				<tr class="systemsTable"><td class="systemsTable" colspan="2">
 					<div class="systemsTableEnd">
-						<input type="button" value="Add another entry" onClick="addSystem(adminform);" />
+						<input type="button" value="Add another entry"
+							onClick="addSystem(this.form)" />
 					</div>
 				</td></tr>
 			</tfoot>
-
-			<tbody class="systemsTable">
-				<tr class="systemsTable">
-					<td class="systemsTable">
-						<input class="hbInputSys" type="text"
-							id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
-					</td>
-					<td class="systemsTable">
-						<input type="password" autocomplete="off"
-							id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
-							class="hbInputPass"
-							onChange="pwd0Change(adminform);" />
-					</td>
-				</tr>
-			</tbody>
 		</table>
 
+		<input name="numStorage" id="numStorage" type="hidden"
+			tal:attributes="value cur_sysnum | string:1" />
+
 		<div class="hbSubmit" id="hbSubmit">
-			<input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+			<input type="button" name="Submit" value="Submit"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
 </div>
@@ -550,95 +743,147 @@
 		set_page_title('Luci ??? homebase ??? Add a running cluster to be managed by Luci');
 	</script>
 
-	<tal:block tal:define="
-		global sessionObj python:request.SESSION.get('checkRet')" />
-
 	<h2 class="homebase">Add Cluster</h2>
 
+	<tal:block tal:define="
+		global add_cluster request/SESSION/add_cluster | nothing" />
+
 	<form name="adminform" action="" method="post"
-		tal:condition="python: sessionObj and len(sessionObj)">
+		tal:condition="add_cluster">
+
 		<input name="pagetype" type="hidden"
 			tal:attributes="value request/form/pagetype | request/pagetype | nothing" />
 
 		<input name="absoluteURL" type="hidden"
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
 
+		<input name="pass" type="hidden"
+			tal:attributes="value add_cluster/pass | string:0" />
+
+		<input name="cluster_os" type="hidden"
+			tal:attributes="value add_cluster/cluster_os | string:rhel5" />
+
 		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
 			<thead class="systemsTable">
 				<tr class="systemsTable"><td class="systemsTable" colspan="2">
 					<div class="systemsTableTop">
-						<strong>Cluster Name:</strong> <span tal:replace="python: sessionObj['requestResults']['clusterName']" />
-						<input type="hidden" type="text" id="clusterName" name="clusterName" tal:attributes="value python: sessionObj['requestResults']['clusterName']" />
+						<strong class="cluster_name">Cluster Name:
+							<span tal:replace="add_cluster/name | string:[unknown]" />
+						</strong>
+						<input type="hidden" id="clusterName" name="clusterName"
+							tal:attributes="value add_cluster/name | nothing" />
 					</div>
 				</td></tr>
+
 				<tr class="systemsTable">
 					<th class="systemsTable">Node Hostname</th>
 					<th class="systemsTable">Root Password</th>
+					<th class="systemsTable">Key ID</th>
+					<th class="systemsTable">Trust</th>
 				</tr>
+
 			</thead>
 
 			<tfoot class="systemsTable">
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<div tal:condition="python: not 'isComplete' in sessionObj['requestResults'] or not sessionObj['requestResults']['isComplete'] or ('errors' in sessionObj and len(sessionObj['errors']) > 0)">
-						<input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if cluster node passwords are identical.
-					</div>
-					<div class="systemsTable"
-						tal:condition="python: 'isComplete' in sessionObj['requestResults'] and sessionObj['requestResults']['isComplete']">&nbsp;</div>
+					<ul class="vanilla">
+						<li class="vanilla">
+							<input name="check_certs" type="checkbox"
+								tal:attributes="checked python: add_cluster['check_certs'] and 'checked'" />
+							View system certificates before sending any passwords.
+						</li>
+						<li class="vanilla" id="allSameDiv">
+						<li class="vanilla" tal:condition="not: exists: add_cluster/complete">
+							<input type="checkbox" name="allSameCheckBox"
+								id="allSameCheckBox" onClick="allPasswdsSame(this.form)"
+								tal:attributes="checked python: add_cluster['identical_passwds'] and 'checked'"
+							/>
+							
+							Check if node passwords are identical.
+						</li>
+						<li class="vanilla" tal:condition="python: add_cluster['pass'] > 0 and 'incomplete' in add_cluster">
+							<input type="checkbox" name="asis">
+							Add the cluster to Luci as-is.<br>
+							Any nodes that are not authenticated will need to be authenticated later.
+						</li>
+					</ul>
+					<br/>
 				</td></tr>
 			</tfoot>
 
-			<span tal:omit-tag=""
-				tal:define="global sysNum python: 0"
-			/>
-
-			<tbody class="systemsTable" tal:condition="python: 'nodeList' in sessionObj['requestResults']">
-			<tal:block tal:repeat="node python: sessionObj['requestResults']['nodeList']">
-				<span tal:omit-tag=""
-					tal:define="global nodeAuth python: node['cur_auth']" />
+			<tal:block tal:define="global cur_sysnum python:0" />
 
-				<tr class="systemsTable">
+			<tbody class="systemsTable" tal:condition="add_cluster/nodes">
+			 <tal:block tal:repeat="cur_sys add_cluster/nodes">
+				<tr class="systemsTable"
+					tal:define="sys python: add_cluster['nodes'][cur_sys]">
 					<td class="systemsTable">
 						<input type="text"
 							tal:attributes="
-								id python: '__SYSTEM' + str(sysNum) + ':Addr';
-								name python: '__SYSTEM' + str(sysNum) + ':Addr';
-								value python: node['host'];
-								class python: 'hbInputSys' + ('errors' in node and ' error' or '');
-								disabled python: (nodeAuth and node['host'].count('.') > 0) and 1 or 0"
+								value sys/host | nothing;
+								id python: '__SYSTEM%d:Addr' % cur_sysnum;
+								name python: '__SYSTEM%d:Addr' % cur_sysnum;
+								class python: 'hbInputSys' + ('errors' in sys and ' error' or '');
+								disabled python: ('auth' in sys and sys['host'].count('.') > 0) and 1 or 0"
 						 />
 					</td>
 					<td class="systemsTable">
-						<input onChange="pwd0Change(adminform);"
+						<tal:block tal:condition="not: exists: sys/auth">
+							<input type="password"
+								autocomplete="off"
+								onChange="pwd0Change(this.form)"
+								tal:attributes="
+									value sys/passwd | nothing;
+									class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+						</tal:block>
+
+						<tal:block tal:condition="exists: sys/auth">
+							<input type="text" onChange="pwd0Change(this.form)"
+								disabled="disabled" value="[authenticated]"
+								tal:attributes="
+									class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+									id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+									name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+						</tal:block>
+					</td>
+					<td class="systemsTable">
+						<img 
 							tal:attributes="
-								type python: nodeAuth and 'text' or 'password';
-								value python: nodeAuth and '[authenticated]' or '';
-								class python: 'hbInputPass' + ('errors' in node and ' error' or '');
-								id python: '__SYSTEM' + str(sysNum) + ':Passwd';
-								name python: '__SYSTEM' + str(sysNum) + ':Passwd';
-								disabled python: nodeAuth and 1 or 0"
+								src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+								title sys/fp | string:no key fingerprint available" />
+						<input type="hidden"
+							tal:attributes="
+								id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+								name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+								value sys/fp | nothing" />
+					</td>
+					<td class="systemsTable">
+						<input type="checkbox" tal:attributes="
+							checked python: add_cluster['pass'] > 0;
+							id python: '__SYSTEM%dTrusted' % cur_sysnum;
+							name python: '__SYSTEM%dTrusted' % cur_sysnum;
+							disabled python: 'trusted' in sys"
 						/>
 					</td>
 				</tr>
-				<span tal:omit-tag=""
-					tal:define="global sysNum python: sysNum + 1"
-				/>
-			</tal:block>
+				<tal:block tal:define="global cur_sysnum python: cur_sysnum + 1" />
+			 </tal:block>
 			</tbody>
 		</table>
 
 		<input name="numStorage" id="numStorage" type="hidden"
-			tal:attributes="value python: sysNum" />
+			tal:attributes="value cur_sysnum" />
 
 		<div class="hbSubmit" id="hbSubmit">
-			<input type="button" name="Submit" value="Add This Cluster" onClick="validateForm(document.adminform);" />
+			<input type="button" name="Submit" value="Add This Cluster"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
 
-	<div tal:condition="python: not sessionObj or not len(sessionObj)">
-		<span class="error">
-			A data integrity error has occurred. Please attempt to add this cluster to the Luci management interface again.
-		</span>
-		<tal:block tal:define="nop python:here.abortManageCluster(request)" />
+	<div tal:condition="add_cluster">
+		<tal:block tal:define="x python: request.SESSION.delete('add_cluster')" />
 	</div>
 </div>
 
@@ -675,19 +920,26 @@
 
 		<p class="hbText">Enter one node from the cluster you wish to add to the Luci management interface.</p>
 
-		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
+		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0"
+			tal:define="cur_sys request/SESSION/add_cluster_initial | nothing">
+
 			<thead class="systemsTable">
 				<tr class="systemsTable">
 					<th class="systemsTable">System Hostname</th>
 					<th class="systemsTable">Root Password</th>
+					<tal:block tal:condition="cur_sys">
+						<th>Key Id</th>
+						<th>Trust</th>
+					</tal:block>
 				</tr>
 			</thead>
 
 			<tfoot class="systemsTable">
 				<tr class="systemsTable"><td colspan="2" class="systemsTable">
-					<div class="hbcheckdiv">
-						<input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" /> Attempt to authenticate to all cluster nodes using the password provided above.
-					</div>
+					<ul class="vanilla">
+						<li class="vanilla"><input name="check_certs" type="checkbox">View system certificates before sending any passwords.</li>
+						<li class="vanilla"><input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(this.form);"/>Authenticate to all cluster nodes using the password provided above.</li>
+					</ul>
 				</td></tr>
 			</tfoot>
 
@@ -695,21 +947,42 @@
 				<tr class="systemsTable">
 					<td class="systemsTable">
 						<input class="hbInputSys" type="text"
-							id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+							id="__SYSTEM0:Addr" name="__SYSTEM0:Addr"
+							tal:attributes="
+								value cur_sys/host | nothing" />
 					</td>
 					<td class="systemsTable">
-						<input type="password" autocomplete="off"
+						<input class="hbInputPass" type="password"
+							onChange="pwd0Change(this.form)"
+							autocomplete="off"
 							id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
-							class="hbInputPass" />
+							tal:attributes="
+								value cur_sys/passwd | nothing" />
 					</td>
+					<tal:block tal:condition="cur_sys">
+						<td class="systemsTable">
+							<img tal:attributes="
+								title sys/fp | string:no key fingerprint available;
+								src python: 'trusted' in cur_sys and '/luci/lock-ok.png' or ('fp' in cur_sys and '/luci/lock-closed.png' or '/luci/lock-open.png')"
+							/>
+						</td>
+						<td class="systemsTable">
+							<input type="checkbox" name="host_is_trusted" checked="checked" />
+						</td>
+					</tal:block>
 				</tr>
 			</tbody>
+			<tal:block tal:condition="cur_sys">
+				<tal:block
+					tal:define="x python: request.SESSION.delete('add_cluster_initial')" />
+			</tal:block>
 		</table>
 
 		<input type="hidden" name="numStorage" value="1" />
 
 		<div class="hbSubmit" id="hbSubmit">
-			<input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+			<input type="button" name="Submit" value="Submit"
+				onClick="validateForm(this.form)" />
 		</div>
 	</form>
 </div>
--- conga/luci/homebase/homebase_common.js	2006/11/03 19:13:57	1.14
+++ conga/luci/homebase/homebase_common.js	2006/12/21 05:08:48	1.15
@@ -139,22 +139,40 @@
 	var num_systems = form.numStorage.value;
 
 	var state = cb.checked;
-	var passwd = document.getElementById('__SYSTEM0:Passwd');
-	if (!passwd || passwd.type != 'password')
+
+	var first_passwd = null;
+	var first_system = 0;
+	for (var i = 0 ; i < num_systems ; i++) {
+		var passwd = document.getElementById('__SYSTEM' + i + ':Passwd');
+		if (!passwd || passwd.type != 'password')
+			continue
+		first_passwd = passwd.value;
+		first_system = i;
+		break;
+	}
+
+	if (first_passwd === null)
 		return (-1);
-	passwd = passwd.value;
-	if (!passwd || !state)
-		passwd = '';
 
-	for (var i = 1 ; i < num_systems ; i++) {
+	if (!first_passwd || !state)
+		first_passwd = '';
+
+	for (var i = first_system + 1 ; i < num_systems ; i++) {
 		var element = document.getElementById('__SYSTEM' + i + ':Passwd');
 		if (element && element.type == 'password') {
-			element.value = passwd;
+			element.value = first_passwd;
 			element.disabled = state;
 		}
 	}
 }
 
+function delete_element_id(id_str) {
+	var elem = document.getElementById(id_str);
+	if (!elem || !elem.parentNode)
+		return (-1);
+	elem.parentNode.removeChild(elem);
+}
+
 function pwd0Change(form) {
 	var element = document.getElementById('allSameCheckBox');
 	if (element && element.checked)
@@ -182,23 +200,43 @@
 	newsysp.setAttribute('value', '');
 	newsysp.setAttribute('autocomplete', 'off');
 
+	var first_passwd = '';
+	for (var i = 0 ; i < num_systems - 1 ; i++) {
+		var pwd = document.getElementById('__SYSTEM' + i + ':Passwd');
+		if (!pwd || pwd.type != 'password')
+			continue;
+		first_passwd = pwd.value;
+		break;
+	}
+
 	var allSameCB = document.getElementById('allSameCheckBox');
 	if (allSameCB && allSameCB.checked) {
-		newsysp.setAttribute('value', document.getElementById('__SYSTEM0:Passwd').value);
+		newsysp.setAttribute('value', first_passwd);
 		newsysp.setAttribute('disabled', true);
 	}
 
 	var newrow = document.createElement('tr');
+	newrow.setAttribute('id', '__SYSTEM_ROW_' + num_systems);
 	newrow.className = 'systemsTable';
+
 	var hcol = document.createElement('td');
 	hcol.className = 'systemsTable';
 	var pcol = document.createElement('td');
 	pcol.className = 'systemsTable';
+	var dcol = document.createElement('td');
+	dcol.className = 'systemsTable';
+	var del_img = document.createElement('img');
+	del_img.src = '/luci/delete-row.png';
+	del_img.title = 'delete this row'
+	del_img.className = 'deleteRow'
+	del_img.setAttribute('onClick', 'delete_element_id(\'' + newrow.id + '\')');
+	dcol.appendChild(del_img);
 
 	hcol.appendChild(newsys);
 	pcol.appendChild(newsysp);
 	newrow.appendChild(hcol);
 	newrow.appendChild(pcol);
+	newrow.appendChild(dcol);
 	sltab.appendChild(newrow);
 
 	form.numStorage.value = ++num_systems;
--- conga/luci/homebase/index_html	2006/11/01 23:04:17	1.20
+++ conga/luci/homebase/index_html	2006/12/21 05:08:48	1.21
@@ -134,23 +134,24 @@
 
 		<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
 			<div class="hbclosebox">
-				<a href="javascript:hide_element('retmsgsdiv');"><img src="x.png"></a>
+				<a href="javascript:hide_element('retmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
 			</div>
-			<ul class="retmsgs">
+			<p class="retmsgs">Status messages:</p>
+			<ul class="statusmsg">
 				<tal:block tal:repeat="e python:ret['messages']">
-					<li class="retmsgs" tal:content="python:e" />
+					<li class="statusmsg" tal:content="python:e" />
 				</tal:block>
 			</ul>
 		</div>
 
 		<div id="errmsgsdiv" class="errmsgs" tal:condition="python:(ret and 'errors' in ret and len(ret['errors']))">
 			<div class="hbclosebox">
-				<a class="hbclosebox" href="javascript:hide_element('errmsgsdiv');"><img src="x.png"></a>
+				<a class="hbclosebox" href="javascript:hide_element('errmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
 			</div>
 			<p class="errmsgs">The following errors occurred:</p>
-			<ul class="errmsgs">
+			<ul class="statusmsg">
 				<tal:block tal:repeat="e python:ret['errors']">
-					<li class="errmsgs" tal:content="python:e" />
+					<li class="statusmsg" tal:content="python:e" />
 				</tal:block>
 			</ul>
 		</div>
--- conga/luci/homebase/luci_homebase.css	2006/12/01 14:56:54	1.30
+++ conga/luci/homebase/luci_homebase.css	2006/12/21 05:08:48	1.31
@@ -48,6 +48,8 @@
 }
 
 input.qdisk {
+	font-family: "Bitstream Vera Sans Mono", "DejaVu Sans Mono", monospace ! important;
+	font-size: 12px ! important;
 	padding: .2em;
 }
 
@@ -81,6 +83,10 @@
 	margin-left: 0 ! important;
 }
 
+ul.statusmsg, li.statusmsg {
+	color: black ! important;
+}
+
 ul.deploy {
 	margin-bottom: +.5em;
 }
@@ -153,7 +159,7 @@
 	color: green !important;
 }
 
-p.errmsgs {
+p.errmsgs, p.retmsgs {
 	font-weight: 800;
 }
 
@@ -163,7 +169,7 @@
 	border-width: 2px;
 	border-color: red;
 	margin-top: 2em;
-	max-width: 700px;
+	max-width: 600px ! important;
 }
 
 div.retmsgs {
@@ -172,7 +178,7 @@
 	border-style: dotted;
 	border-width: 2px;
 	border-color: green;
-	max-width: 700px;
+	max-width: 600px ! important;
 }
 
 div.hbCSystems {
@@ -208,6 +214,7 @@
 table.systemsTable {
 	padding-left: +.5em;
 	background: #dee7ec;
+	max-width: 700px;
 }
 
 td.systemsTable {
@@ -250,8 +257,6 @@
 }
 
 img.qdel_img {
-	height: 7px;
-	width: 7px;
 	background: #dee7ec;
 	border: none;
 }
@@ -415,6 +420,10 @@
 	color: blue ! important;
 }
 
+img.deleteRow, img.closeBox {
+	cursor: pointer;
+}
+
 *.running,
 *.node_active {
 	color: green ! important;
--- conga/luci/homebase/validate_sys_remove.js	2006/10/16 20:46:46	1.2
+++ conga/luci/homebase/validate_sys_remove.js	2006/12/21 05:08:48	1.3
@@ -6,7 +6,7 @@
 	if (!form)
 		return (-1);
 
-	var num_clusters = document.getElementById('numClusters').value;
+	var num_clusters = document.getElementById('num_clusters').value;
 	for (var i = 0 ; i < num_clusters ; i++) {
 		var element = document.getElementById('__CLUSTER' + i);
 		if (!element || !element.value || !element.checked)
@@ -14,7 +14,7 @@
 		selected_clusters.push(element.value);
 	}
 
-	var num_storage = document.getElementById('numStorage').value;
+	var num_storage = document.getElementById('num_storage').value;
 	for (var i = 0 ; i < num_storage ; i++) {
 		var element = document.getElementById('__SYSTEM' + i);
 		if (!element || !element.value || !element.checked)
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/20 22:06:49	1.190
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/21 05:08:49	1.191
@@ -5,7 +5,6 @@
 from conga_constants import *
 from ricci_bridge import *
 from ricci_communicator import RicciCommunicator, RicciError, batch_status, extract_module_status
-from string import lower
 import time
 import Products.ManagedSystem
 from Products.Archetypes.utils import make_uuid
@@ -24,9 +23,9 @@
 from QuorumD import QuorumD
 from Heuristic import Heuristic
 from clusterOS import resolveOSType
-from FenceHandler import validateNewFenceDevice, FENCE_OPTS
+from FenceHandler import validateNewFenceDevice, FENCE_OPTS, validateFenceDevice
 from GeneralError import GeneralError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode, delCluster
+from homebase_adapters import manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode, delCluster, parseHostForm
 from LuciSyslog import LuciSyslog
 
 #Policy for showing the cluster chooser menu:
@@ -42,391 +41,519 @@
 except:
 	pass
 
-def validateClusterNodes(request, sessionData, clusterName, numStorage):
-	nodeList = list()
-	nodeHash = {}
-	rnodeHash = {}
-	oldNodeHash = {}
-	oldRnodeHash = {}
-	requestResults = {}
-	errors = list()
+def buildClusterCreateFlags(self, batch_map, clusterName):
+	path = str(CLUSTER_FOLDER_PATH + clusterName)
 
-	if sessionData and 'requestResults' in sessionData:
-		requestResults = sessionData['requestResults']
-		if 'nodeHash' in requestResults:
-			oldNodeHash = requestResults['nodeHash']
-		if 'rnodeHash' in requestResults:
-			oldRnodeHash = requestResults['rnodeHash']
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+	except Exception, e:
+		luci_log.debug_verbose('buildCCF0: no cluster folder@%s' % path)
+		return None
 
-	i = 0
-	while i < numStorage:
+	for key in batch_map.keys():
 		try:
-			sysData = request.form['__SYSTEM' + str(i)]
-			if not sysData or sysData[0] == '':
-				raise
-
-			if len(sysData) < 2 or sysData[1] == '':
-				errors.append('No password was specified for host \"' + sysData[0] + '\"')
-				raise
-		except:
-			i += 1
-			continue
-
-		if len(sysData) > 1:
-			node = nodeAuth(None, sysData[0], sysData[1])
-
-			if oldRnodeHash and node['ricci_host'] in oldRnodeHash:
-				oldNode = oldRnodeHash[node['ricci_host']]
-			elif oldNodeHash and node['host'] in nodeHash:
-				oldNode = oldNodeHash[node['host']]
-			else:
-				oldNode = None
-
-			if 'errors' in node:
-				errors.append(node['errors'])
-				node['errors'] = True
-
-			if node['host'] in nodeHash or node['ricci_host'] in rnodeHash:
-				node['errors'] = True
-				errors.append('You added the node \"' + node['host'] + '\" more than once')
-			else:
-				if oldNode and 'prev_auth' in oldNode:
-					node['prev_auth'] = oldNode['prev_auth']
-
-				nodeHash[node['host']] = node
-				rnodeHash[node['ricci_host']] = node
-				nodeList.append(node)
-		i += 1
-
-	sfn = lambda x, y: \
-		x['cur_auth'] - y['cur_auth'] or (('errors' in y) - ('errors' in x))
-	nodeList.sort(sfn)
-
-	dfn = lambda x: not 'cur_auth' in x or x['cur_auth'] != True
-	cluster_properties = {
-		'clusterName': clusterName,
-		'nodeList': nodeList,
-		'nodeHash': nodeHash,
-		'rnodeHash': rnodeHash,
-		'isComplete': len(errors) < 1 and len(filter(dfn, nodeList)) == 0
-	}
-
-	return [errors, cluster_properties]
-
+			key = str(key)
+			batch_id = str(batch_map[key])
+			#This suffix needed to avoid name collision
+			objname = str(key + "____flag")
 
-def validateCreateCluster(self, request):
-	errors = list()
-	requestResults = {}
+			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#now designate this new object properly
+			objpath = str(path + "/" + objname)
+			flag = self.restrictedTraverse(objpath)
 
-	if not havePermCreateCluster(self):
-		return (False, {'errors': ['You do not have sufficient rights to create a cluster.']})
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
+			flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
+			flag.manage_addProperty(LAST_STATUS, 0, "int")
+		except Exception, e:
+			luci_log.debug_verbose('buildCCF1: error creating flag for %s: %s' \
+				% (key, str(e)))
 
+def parseClusterNodes(self, request, cluster_os):
+	check_certs = False
 	try:
-	 	sessionData = request.SESSION.get('checkRet')
+		check_certs = 'check_certs' in request.form
 	except:
-		sessionData = None
-
-	if not 'clusterName' in request.form or not request.form['clusterName']:
-		return (False, {'errors': [ 'No cluster name was specified.' ]})
-	clusterName = request.form['clusterName']
+		check_certs = False
 
+	download_pkgs = 1
 	try:
-		numStorage = int(request.form['numStorage'])
+		download_pkgs = int(request.form['download_pkgs'].strip())
 	except:
-		return (False, { 'errors': ['Unknown number of systems entered'], 'requestResults': requestResults })
-
-	if numStorage < 1:
-		return (False, { 'errors': ['A cluster must contain at least one node'], 'requestResults': requestResults })
-
-	ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
-	errors.extend(ret[0])
-	cluster_properties = ret[1]
+		download_pkgs = 1
 
-	rhn_dl = 1
+	clusterName = None
 	try:
-		rhn_dls = request.form['rhn_dl'].strip().lower()
-		if rhn_dls != '1' and rhn_dls != 'true':
-			rhn_dl = 0
+		clusterName = str(request.form['clusterName'])
 	except:
-		rhn_dl = 0
+		clusterName = None
+
+	if clusterName is None:
+		luci_log.debug_verbose('PCN0: no cluster name was given')
+		return (False, { 'errors': [ 'No cluster name was given.' ]})
 
-	enable_storage = 0
+	shared_storage = False
 	try:
-		enable_storage_str = request.form['enable_storage'].strip().lower()
-		if enable_storage_str:
-			enable_storage = 1
+		shared_storage = request.form.has_key('enable_storage')
 	except:
-		enable_storage = 0
+		shared_storage = False
 
+	same_node_passwds = False
 	try:
-		nodeList = cluster_properties['nodeList']
-		if len(nodeList) < 1:
-			raise
+		same_node_passwds = 'allSameCheckBox' in request.form
 	except:
-		errors.append('A cluster must contain@least one node')
+		same_node_passwds = False
 
-	cluster_os = None
-	try:
-		cluster_os = nodeList[0]['os']
-		if not cluster_os:
-			raise KeyError('OS for ' + nodeList[0]['host'] + ' is blank')
-	except KeyError, e:
-		cluster_properties['isComplete'] = False
-		errors.append('Unable to identify the operating system running on the first cluster node: ' + str(e))
+	add_cluster = { 'name': clusterName,
+					'shared_storage': shared_storage,
+					'download_pkgs': download_pkgs,
+					'cluster_os': cluster_os,
+					'identical_passwds': same_node_passwds,
+					'check_certs': check_certs }
+
+	system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
+	add_cluster['nodes'] = system_list
+	
+	for i in system_list:
+		cur_system = system_list[i]
+
+		cur_host_trusted = 'trusted' in cur_system
+		cur_host = cur_system['host']
 
-	if cluster_properties['isComplete'] != True:
-		nodeUnauth(nodeList)
-		return (False, {'errors': errors, 'requestResults':cluster_properties })
-	else:
 		try:
-			if len(filter(lambda x: x['os'] != cluster_os, nodeList[1:])) > 0:
-				raise Exception('different operating systems were detected.')
+			cur_passwd = cur_system['passwd']
 		except:
-			cluster_properties['isComplete'] = False
-			errors.append('Cluster nodes must be running compatible operating systems.')
+			cur_passwd = None
 
-	if cluster_properties['isComplete'] == True:
-		batchNode = createClusterBatch(cluster_os,
-						clusterName,
-						clusterName,
-						map(lambda x: x['host'], nodeList),
-						True,
-						True,
-						enable_storage,
-						False,
-						rhn_dl)
-
-		if not batchNode:
-			nodeUnauth(nodeList)
-			cluster_properties['isComplete'] = False
-			errors.append('Unable to generate cluster creation ricci command')
-			return (False, {'errors': errors, 'requestResults':cluster_properties })
-
-		error = manageCluster(self, clusterName, nodeList)
-		if error:
-			nodeUnauth(nodeList)
-			cluster_properties['isComplete'] = False
-			errors.append(error)
-			return (False, {'errors': errors, 'requestResults':cluster_properties })
-
-		batch_id_map = {}
-		rc = None
-		for i in nodeList:
-			success = True
+		if (cur_host_trusted or not check_certs) and cur_passwd:
 			try:
-				rc = RicciCommunicator(i['host'])
-			except RicciError, e:
-				luci_log.debug('Unable to connect to the ricci agent on %s: %s'\
-					% (i['host'], str(e)))
-				success = False
+				rc = RicciCommunicator(cur_host, enforce_trust=True)
+				if not rc:
+					raise Exception, 'connection failed'
+			except Exception, e:
+				cur_system['errors'] = True
+				incomplete = True
+				errors.append('Unable to connect to %s: %s' \
+					% (cur_host, str(e)))
+				luci_log.debug_verbose('PCN1: %s: %s' % (cur_host, str(e)))
+				continue
+
+			prev_auth = rc.authed()
+			cur_system['prev_auth'] = prev_auth
+
+			try:
+				if prev_auth:
+					messages.append('Host %s is already authenticated.' \
+						% cur_host) 
+				else:
+					rc.auth(cur_passwd)
+
+				if not rc.authed():
+					raise Exception, 'authentication failed'
 			except:
-				success = False
+				cur_system['errors'] = True
+				incomplete = True
+				errors.append('Error authenticating to %s: %s' \
+					% (cur_host, str(e)))
+				luci_log.debug_verbose('PCN2: %s: %s' % (cur_host, str(e)))
+				continue
+
+			cur_cluster_info = rc.cluster_info()
+			if cur_cluster_info[0] or cur_cluster_info[1]:
+				cur_system['errors'] = True
+				incomplete = True
+
+				if cur_cluster_info[0]:
+					cur_cluster_name = cur_cluster_info[0]
+				elif cur_cluster_info[1]:
+					cur_cluster_name = cur_cluster_info[1]
 
-			if success == True:
 				try:
-					resultNode = rc.process_batch(batchNode, async=True)
-					batch_id_map[i['host']] = resultNode.getAttribute('batch_id')
-				except:
-					success = False
+					if not cur_system['prev_auth']:
+						rc.unauth()
+						del cur_system['trusted']
+				except Exception, e:
+					luci_log.debug_verbose('PCN3: %s: %s' % (cur_host, str(e)))
+
+				errors.append('%s reports it is a member of cluster \"%s\"' \
+					% (cur_host, cur_cluster_name))
+				luci_log.debug_verbose('PCN4: %s: already in %s cluster' \
+					% (cur_host, cur_cluster_name))
+				continue
 
-			if not success:
-				nodeUnauth(nodeList)
-				cluster_properties['isComplete'] = False
-				errors.append('An error occurred while attempting to add cluster node \"' + i['host'] + '\"')
-				return (False, {'errors': errors, 'requestResults':cluster_properties })
-		buildClusterCreateFlags(self, batch_id_map, clusterName)
+			cur_host_os = resolveOSType(rc.os())
+			if cluster_os is None:
+				cluster_os = cur_host_os
+				add_cluster['cluster_os'] = cur_host_os
+
+			elif cluster_os != cur_host_os:
+				cur_system['errors'] = True
+				incomplete = True
 
-	response = request.RESPONSE
-	response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
+				try:
+					if not cur_system['prev_auth']:
+						rc.unauth()
+						del cur_system['trusted']
+				except Exception, e:
+					luci_log.debug_verbose('PCN5: %s: %s' % (cur_host, str(e)))
+
+				errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
+				luci_log.debug_verbose('PCN6: version mismatch for %s: (%s vs. %s)' \
+					% (cur_host, cur_host_os, cluster_os))
+				continue
 
-def buildClusterCreateFlags(self, batch_map, clusterName):
-	path = str(CLUSTER_FOLDER_PATH + clusterName)
+	return add_cluster, incomplete, errors, messages
 
+def validateCreateCluster(self, request):
 	try:
-		clusterfolder = self.restrictedTraverse(path)
-	except Exception, e:
-		luci_log.debug_verbose('buildCCF0: no cluster folder at %s' % path)
-		return None
+		request.SESSION.delete('create_cluster')
+	except:
+		pass
 
-	for key in batch_map.keys():
-		try:
-			key = str(key)
-			batch_id = str(batch_map[key])
-			#This suffix needed to avoid name collision
-			objname = str(key + "____flag")
+	cluster_os = None
+	try:
+		cluster_os = request.form['cluster_os'].strip()
+		if not cluster_os:
+			raise Exception, 'cluster OS is blank'
+	except:
+		cluster_os = None
 
-			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-			#now designate this new object properly
-			objpath = str(path + "/" + objname)
-			flag = self.restrictedTraverse(objpath)
+	add_cluster, incomplete, errors, messages = parseClusterNodes(self, request, cluster_os)
+	clusterName = add_cluster['name']
 
-			flag.manage_addProperty(BATCH_ID, batch_id, "string")
-			flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
-			flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
-			flag.manage_addProperty(LAST_STATUS, 0, "int")
+	if incomplete or len(errors) > 0:
+		request.SESSION.set('create_cluster', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	node_list = add_cluster['nodes'].keys()
+	batchNode = createClusterBatch(add_cluster['cluster_os'],
+					clusterName,
+					clusterName,
+					node_list,
+					True,
+					True,
+					add_cluster['shared_storage'],
+					False,
+					add_cluster['download_pkgs'])
+
+	if not batchNode:
+		request.SESSION.set('create_cluster', add_cluster)
+		errors.append('Unable to generate cluster creation ricci command')
+		return (False, { 'errors': errors, 'messages': messages })
+
+	error = manageCluster(self, clusterName, add_cluster['nodes'], add_cluster['cluster_os'])
+	if error:
+		errors.append('Unable to create the cluster Luci database objects')
+		request.SESSION.set('create_cluster', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	batch_id_map = {}
+	for i in node_list:
+		try:
+			rc = RicciCommunicator(i)
+			if not rc:
+				raise 'rc is None'
 		except Exception, e:
-			luci_log.debug_verbose('buildCCF1: error creating flag for %s: %s' \
-				% (key, str(e)))
+			msg = 'Unable to connect to the ricci agent on %s: %s' % (i, str(e))
+			errors.append(msg)
+			luci_log.debug_verbose(msg)
+			if len(batch_id_map) == 0:
+				request.SESSION.set('create_cluster', add_cluster)
+				return (False, { 'errors': errors, 'messages': messages })
+			continue
 
-def validateAddClusterNode(self, request):
-	requestResults = {}
-	errors = list()
+		try:
+			resultNode = rc.process_batch(batchNode, async=True)
+			batch_id_map[i] = resultNode.getAttribute('batch_id')
+		except:
+			errors.append('An error occurred while attempting to add cluster node \"%s\"' % i)
+			if len(batch_id_map) == 0:
+				request.SESSION.set('create_cluster', add_cluster)
+				return (False, { 'errors': errors, 'messages': messages })
+			continue
 
+	buildClusterCreateFlags(self, batch_id_map, clusterName)
+	response = request.RESPONSE
+	response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
+
+def validateAddClusterNode(self, request):
 	try:
-		sessionData = request.SESSION.get('checkRet')
+		request.SESSION.delete('add_node')
 	except:
-		sessionData = None
+		pass
 
-	if 'clusterName' in request.form:
-		clusterName = str(request.form['clusterName'])
-	else:
-		luci_log.debug_verbose('vACN00: no cluster name was given')
-		return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
+	check_certs = False
+	try:
+		check_certs = 'check_certs' in request.form
+	except:
+		check_certs = False
 
-	rhn_dl = 1
+	download_pkgs = 1
 	try:
-		rhn_dls = request.form['rhn_dl'].strip().lower()
-		if rhn_dls != '1' and rhn_dls != 'true':
-			rhn_dl = 0
+		download_pkgs = int(request.form['download_pkgs'].strip())
 	except:
-		rhn_dl = 0
+		download_pkgs = 1
 
-	enable_storage = 0
+	cluster_os = None
 	try:
-		enable_storages = request.form['enable_storage'].strip().lower()
-		if enable_storages:
-			enable_storage = 1
+		cluster_os = request.form['cluster_os'].strip()
+		if not cluster_os:
+			raise Exception, 'cluster OS is blank'
 	except:
-		enable_storage = 0
+		cluster_os = None
 
+	clusterName = None
 	try:
-		numStorage = int(request.form['numStorage'])
-		if numStorage < 1:
-			raise Exception, 'no nodes were added'
-	except Exception, e:
-		luci_log.debug_verbose('vACN0: %s: %s' % (clusterName, str(e)))
-		errors.append('You must specify at least one node to add to the cluster')
-		return (False, {'errors': [ errors ], 'requestResults': requestResults })
+		clusterName = str(request.form['clusterName'])
+	except:
+		clusterName = None
+
+	if clusterName is None:
+		luci_log.debug_verbose('VACN0: no cluster name was given')
+		return (False, { 'errors': [ 'No cluster name was given.' ]})
+
+	if cluster_os is None:
+		cluster_folder = None
+		try:
+			cluster_folder = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH + clusterName))
+			if not cluster_folder:
+				raise Exception, 'cluster DB object is missing'
+		except Exception, e:
+			luci_log.debug_verbose('VACN1: %s: %s' % (clusterName, str(e)))
+			return (False, { 'errors': [ 'The database object for %s is missing.' % clusterName ] })
+		
+		try:
+			cluster_os = cluster_folder.manage_getProperty('cluster_os')
+			if not cluster_os:
+				raise Exception, 'cluster os is blank'
+		except Exception, e:
+			luci_log.debug_verbose('VACN2: %s: %s' % (clusterName, str(e)))
+			cluster_os = None
 
-	ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
-	errors.extend(ret[0])
-	cluster_properties = ret[1]
+		if cluster_os is None:
+			try:
+				cluster_ricci = getRicciAgent(self, clusterName)
+				cluster_os = resolveOSType(cluster_ricci.os())
+			except Exception, e:
+				luci_log.debug_verbose('VACN3: %s: %s' % (clusterName, str(e)))
+				cluster_os = None
+
+	if cluster_os is None:
+		luci_log.debug_verbose('Unable to determine cluster OS for %s' % clusterName)
+		return (False, { 'errors': [ 'Unable to determine the version of the cluster suite this cluster is running.' ] })
 
+	shared_storage = False
 	try:
-		nodeList = cluster_properties['nodeList']
-		if len(nodeList) < 1:
-			raise Exception, 'no cluster nodes'
-	except Exception, e:
-		luci_log.debug_verbose('vACN1: %s: %s' % (clusterName, str(e)))
-		errors.append('You must specify@least one valid node to add to the cluster')
+		shared_storage = request.form.has_key('enable_storage')
+	except:
+		shared_storage = False
 
-	clusterObj = None
+	same_node_passwds = False
 	try:
-		clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
-		cluster_os = clusterObj.manage_getProperty('cluster_os')
-		if not cluster_os:
-			raise Exception, 'no cluster OS was found in DB for %s' % clusterName
-	except Exception, e:
-		luci_log.debug_verbose('vACN2: %s: %s' % (clusterName, str(e)))
+		same_node_passwds = 'allSameCheckBox' in request.form
+	except:
+		same_node_passwds = False
+
+	add_cluster = { 'name': clusterName,
+					'shared_storage': shared_storage,
+					'download_pkgs': download_pkgs,
+					'cluster_os': cluster_os,
+					'identical_passwds': same_node_passwds,
+					'check_certs': check_certs }
+
+	system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
+	add_cluster['nodes'] = system_list
+	
+	for i in system_list:
+		cur_system = system_list[i]
+
+		cur_host_trusted = 'trusted' in cur_system
+		cur_host = cur_system['host']
+
 		try:
-			cluster_ricci = getRicciAgent(self, clusterName)
-			if not cluster_ricci:
-				raise Exception, 'cannot find a ricci agent for %s' % clusterName
-			cluster_os = getClusterOS(self, cluster_ricci)['os']
-			if clusterObj is None:
-				try:
-					clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
-				except:
-					pass
+			cur_passwd = cur_system['passwd']
+		except:
+			cur_passwd = None
+
+		if (cur_host_trusted or not check_certs) and cur_passwd:
+			try:
+				rc = RicciCommunicator(cur_host, enforce_trust=True)
+				if not rc:
+					raise Exception, 'connection failed'
+			except Exception, e:
+				cur_system['errors'] = True
+				incomplete = True
+				errors.append('Unable to connect to %s: %s' \
+					% (cur_host, str(e)))
+				luci_log.debug_verbose('VACN4: %s: %s' % (cur_host, str(e)))
+				continue
+
+			prev_auth = rc.authed()
+			cur_system['prev_auth'] = prev_auth
+			try:
+				if prev_auth:
+					messages.append('Host %s is already authenticated.' \
+						% cur_host) 
+				else:
+					rc.auth(cur_passwd)
+
+				if not rc.authed():
+					raise Exception, 'authentication failed'
+			except:
+				cur_system['errors'] = True
+				incomplete = True
+				errors.append('Error authenticating to %s: %s' \
+					% (cur_host, str(e)))
+				luci_log.debug_verbose('VACN5: %s: %s' % (cur_host, str(e)))
+				continue
+
+			cur_cluster_info = rc.cluster_info()
+			if cur_cluster_info[0] or cur_cluster_info[1]:
+				cur_system['errors'] = True
+				incomplete = True
+
+				if cur_cluster_info[0]:
+					cur_cluster_name = cur_cluster_info[0]
+				elif cur_cluster_info[1]:
+					cur_cluster_name = cur_cluster_info[1]
 
 				try:
-					clusterObj.manage_addProperty('cluster_os', cluster_os, 'string')
-				except:
-					pass
-		except Exception, e:
-			luci_log.debug_verbose('vACN3: %s: %s' % (clusterName, str(e)))
-			nodeUnauth(nodeList)
-			cluster_os = None
-			cluster_properties['isComplete'] = False
-			errors.append('Unable to determine the cluster OS for the ' + clusterName + ' cluster.')
+					if not cur_system['prev_auth']:
+						rc.unauth()
+						del cur_system['trusted']
+				except Exception, e:
+					luci_log.debug_verbose('VACN6: %s: %s' % (cur_host, str(e)))
+
+				errors.append('%s reports it is already a member of cluster \"%s\"' % (cur_host, cur_cluster_name))
+				luci_log.debug_verbose('VACN7: %s: already in %s cluster' \
+					% (cur_host, cur_cluster_name))
+				continue
 
-	try:
-		if cluster_os is None:
-			raise Exception, 'no cluster OS found for %s' % clusterName
-		if len(filter(lambda x: x['os'] != cluster_os, nodeList)) > 0:
-			raise Exception, 'different operating systems were detected.'
-	except Exception, e:
-		luci_log.debug_verbose('vACN4: %s: %s' % (clusterName, str(e)))
-		nodeUnauth(nodeList)
-		cluster_properties['isComplete'] = False
-		errors.append('Cluster nodes must be running compatible operating systems.')
-
-	if not cluster_properties['isComplete']:
-		return (False, {'errors': errors, 'requestResults': cluster_properties})
-
-	for clunode in nodeList:
-		try:
-			batchNode = addClusterNodeBatch(clunode['os'],
-							clusterName,
-							True,
-							True,
-							enable_storage,
-							False,
-							rhn_dl)
-			if not batchNode:
-				raise Exception, 'batchnode is None'
-			clunode['batchnode'] = batchNode
-		except Exception, e:
-			luci_log.debug_verbose('vACN5: node add for %s failed: %s' \
-				% (clunode['host'], str(e)))
-			clunode['errors'] = True
-			nodeUnauth(nodeList)
-			cluster_properties['isComplete'] = False
-			errors.append('Unable to initiate node creation for host \"' + clunode['host'] + '\"')
+			cur_host_os = resolveOSType(rc.os())
+			if cluster_os is not None and cluster_os != cur_host_os:
+				cur_system['errors'] = True
+				incomplete = True
+
+				try:
+					if not cur_system['prev_auth']:
+						rc.unauth()
+						del cur_system['trusted']
+				except Exception, e:
+					luci_log.debug_verbose('VACN8: %s: %s' % (cur_host, str(e)))
+
+				errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
+				luci_log.debug_verbose('VACN9: version mismatch for %s: (%s vs. %s)' \
+					% (cur_host, cur_host_os, cluster_os))
+				continue
 
-	if not cluster_properties['isComplete']:
-		return (False, {'errors': errors, 'requestResults': cluster_properties})
+	if incomplete or len(errors) > 0:
+		request.SESSION.set('add_node', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
 
 	try:
 		cluster_ricci = getRicciAgent(self, clusterName)
 		if not cluster_ricci:
-			raise Exception, 'Unable to get a ricci agent for %s' % clusterName
+			raise Exception, 'Unable to find a ricci agent for %s' % clusterName
 	except Exception, e:
-		cluster_properties['isComplete'] = False
-		nodeUnauth(nodeList)
-		errors.append('Unable to contact a Ricci agent for %s.' % clusterName)
-		luci_log.debug_verbose('vACN6: ricci %s: %s' % (clusterName, str(e)))
-		return (False, {'errors': errors, 'requestResults': cluster_properties})
+		incomplete = True
+		errors.append('Unable to contact a ricci agent for %s.' % clusterName)
+		luci_log.debug_verbose('VACN10: %s: %s' % (clusterName, str(e)))
+
+	if incomplete or len(errors) > 0:
+		request.SESSION.set('add_node', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
 
 	try:
 		model = getModelBuilder(None, cluster_ricci, cluster_ricci.dom0())
 		if not model:
+			errors.append('Unable to build the cluster model for %s' \
+				% clusterName)
 			raise Exception, 'unable to get model for %s' % clusterName
+
 		nodesptr = model.getClusterNodesPtr()
 		used_ids = {}
 		for i in model.getNodes():
+			used_ids[int(i.getAttribute('nodeid'))] = 1
+			node_name = str(i.getAttribute('name'))
+			if node_name in system_list:
+				system_list[node_name]['errors'] = True
+				errors.append('%s is already a member of %s' \
+					% (node_name, clusterName))
+	except Exception, e:
+		incomplete = True
+		errors.append('Unable to build the cluster model for %s' \
+			% clusterName)
+		luci_log.debug_verbose('VACN11: %s' % str(e))
+
+	if incomplete or len(errors) > 0:
+		request.SESSION.set('add_node', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	next_node_id = 1
+
+	try:
+		for x in system_list:
+			i = system_list[x]
+
 			try:
-				used_ids[int(i.getAttribute('nodeid'))] = 1
+				batch_node = addClusterNodeBatch(cluster_os,
+								clusterName,
+								True,
+								True,
+								shared_storage,
+								False,
+								download_pkgs)
+				if not batch_node:
+					raise Exception, 'batch is blank'
+				system_list[x]['batch'] = batch_node
 			except Exception, e:
-				luci_log.debug_verbose('vACN7: %s' % str(e))
-				pass
-		next_node_id = 1
-		for i in nodeList:
+				cur_system['errors'] = True
+				incomplete = True
+
+				try:
+					if not cur_system['prev_auth']:
+						rc.unauth()
+						del cur_system['trusted']
+				except Exception, e:
+					luci_log.debug_verbose('VACN12: %s: %s' % (cur_host, str(e)))
+
+				errors.append('Unable to initiate cluster join for %s' % cur_host)
+				luci_log.debug_verbose('VACN13: %s: %s' % (cur_host, str(e)))
+				continue
+
 			next_node_id += 1
 			new_node = ClusterNode()
-			new_node.attr_hash['name'] = i['host']
+			new_node.attr_hash['name'] = str(i['host'])
 			new_node.attr_hash['votes'] = str(1)
 			while next_node_id in used_ids:
 				next_node_id += 1
 			new_node.attr_hash['nodeid'] = str(next_node_id)
 			nodesptr.addChild(new_node)
 
-		model.isModified = True
+		if incomplete or len(errors) > 0:
+			request.SESSION.set('add_node', add_cluster)
+			return (False, { 'errors': errors, 'messages': messages })
+
+		cp = model.getClusterPtr()
+		cp.incrementConfigVersion()
+		model.setModified(True)
 		conf_str = str(model.exportModelAsString())
 		if not conf_str:
-			raise Exception, 'unable to export model as a string'
-		batch_number, result = setClusterConf(cluster_ricci, conf_str)
+			raise Exception, 'Unable to save the new cluster model.'
 
+		batch_number, result = setClusterConf(cluster_ricci, conf_str)
+		if not batch_number or not result:
+			raise Exception, 'batch or result is None'
+	except Exception, e:
+		incomplete = True
+		errors.append('Unable to save the new cluster model.')
+		luci_log.debug_verbose('VACN14: %s' % str(e))
+
+	# Propagate the new cluster.conf to the existing nodes
+	# before having any of the new nodes join. If this fails,
+	# abort the whole process.
+	try:
 		while True:
 			batch_ret = checkBatch(cluster_ricci, batch_number)
 			code = batch_ret[0]
@@ -438,47 +565,61 @@
 			if code == False:
 				time.sleep(0.5)
 	except Exception, e:
-		luci_log.debug_verbose('vACN8: %s' % str(e))
-		errors.append('Unable to update the cluster node list for %s' % clusterName)
-		return (False, {'errors': errors, 'requestResults': cluster_properties})
+		incomplete = True
+		errors.append('Unable to update the cluster node list for %s' \
+			% clusterName)
+		luci_log.debug_verbose('VACN15: %s' % str(e))
+
+	if incomplete or len(errors) > 0:
+		request.SESSION.set('add_node', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
 
-	error = createClusterSystems(self, clusterName, nodeList)
+	error = createClusterSystems(self, clusterName, system_list)
 	if error:
-		luci_log.debug_verbose('vACN9: %s: %s' % (clusterName, str(e)))
-		nodeUnauth(nodeList)
-		cluster_properties['isComplete'] = False
+		incomplete = True
 		errors.append(error)
-		return (False, {'errors': errors, 'requestResults': cluster_properties})
+		luci_log.debug_verbose('VACN16: %s: %s' % (clusterName, error))
+
+	if incomplete or len(errors) > 0:
+		request.SESSION.set('add_node', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
 
 	batch_id_map = {}
-	for clunode in nodeList:
+	for x in system_list:
+		clunode = system_list[x]
 		success = True
+
+		cur_host = clunode['host']
 		try:
-			rc = RicciCommunicator(clunode['host'])
+			rc = RicciCommunicator(cur_host)
 			if not rc:
 				raise Exception, 'rc is None'
 		except Exception, e:
-			nodeUnauth([clunode['host']])
 			success = False
-			luci_log.info('vACN10: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
+			clunode['errors'] = True
+			errors.append('Unable to connect to the ricci agent on %s: %s' \
+				% (cur_host, str(e)))
+			luci_log.info('VACN17: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
 
 		if success:
 			try:
-				resultNode = rc.process_batch(clunode['batchnode'], async=True)
-				batch_id_map[clunode['host']] = resultNode.getAttribute('batch_id')
+				resultNode = rc.process_batch(clunode['batch'], async=True)
+				batch_id_map[cur_host] = resultNode.getAttribute('batch_id')
 			except Exception, e:
-				nodeUnauth([clunode['host']])
+				clunode['errors'] = True
 				success = False
-				luci_log.info('vACN11: %s' % (clunode['host'], str(e)))
+				luci_log.debug_verbose('VACN18: %s: %s' \
+					% (cur_host, str(e)))
 
 		if not success:
-			cluster_properties['isComplete'] = False
-			errors.append('An error occurred while attempting to add cluster node \"' + clunode['host'] + '\"')
+			incomplete = True
+			errors.append('An error occurred while attempting to add cluster node \"%s\"')
 
-	buildClusterCreateFlags(self, batch_id_map, clusterName)
+	if incomplete or len(errors) > 0:
+		request.SESSION.set('add_node', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
 
-	if len(errors) > 0:
-		return (False, {'errors': errors, 'requestResults': cluster_properties})
+	buildClusterCreateFlags(self, batch_id_map, clusterName)
 
 	response = request.RESPONSE
 	response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
@@ -642,6 +783,9 @@
 		return (False, {'errors': [ 'Unable to determine cluster name' ]})
 
 	try:
+		cp = model.getClusterPtr()
+		cp.incrementConfigVersion()
+		model.setModified(True)
 		conf = model.exportModelAsString()
 		if not conf:
 			raise Exception, 'model string for %s is blank' % clustername
@@ -696,9 +840,9 @@
 			if res and res[2]:
 				errors.extend(res[2])
 			raise Exception, 'An error occurred while adding this resource'
-		modelb = res[1]
+		model = res[1]
 		newres = res[0]
-		addResource(self, request, modelb, newres, res_type)
+		addResource(self, request, model, newres, res_type)
 	except Exception, e:
 		if len(errors) < 1:
 			errors.append('An error occurred while adding this resource')
@@ -747,7 +891,6 @@
 	try:
 		model.usesMulticast = True
 		model.mcast_address = addr_str
-		model.isModified = True
 	except Exception, e:
 		luci_log.debug('Error updating mcast properties: %s' % str(e))
 		errors.append('Unable to update cluster multicast properties')
@@ -1064,7 +1207,7 @@
   try:
     clustername = model.getClusterName()
     if not clustername:
-      raise Exception, 'cluster name from modelb.getClusterName() is blank'
+      raise Exception, 'cluster name from model.getClusterName() is blank'
   except Exception, e:
     luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
     errors.append('Unable to determine cluster name from model') 
@@ -1143,7 +1286,7 @@
     return (False, {'errors': ['No form was submitted']})
 
   #fencehandler = FenceHandler()
-  error_code,error_string = validateNewFenceDevice(form, model)
+  error_code, error_string = validateNewFenceDevice(form, model)
   if error_code == FD_VAL_SUCCESS:
     messages.append(error_string)
     try:
@@ -1161,7 +1304,7 @@
     try:
       clustername = model.getClusterName()
       if not clustername:
-        raise Exception, 'cluster name from modelb.getClusterName() is blank'
+        raise Exception, 'cluster name from model.getClusterName() is blank'
     except Exception, e:
       luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
       errors.append('Unable to determine cluster name from model') 
@@ -1240,7 +1383,7 @@
   #entry for this fence device.
   #
   #pass form and model to validation method, then save changes if it passes.
-  error_code,error_string = validateFenceDevice(form, model)
+  error_code, error_string = validateFenceDevice(form, model)
   if error_code == FD_VAL_SUCCESS:
     messages.append(error_string)
     try:
@@ -1258,7 +1401,7 @@
     try:
       clustername = model.getClusterName()
       if not clustername:
-        raise Exception, 'cluster name from modelb.getClusterName() is blank'
+        raise Exception, 'cluster name from model.getClusterName() is blank'
     except Exception, e:
       luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
       errors.append('Unable to determine cluster name from model') 
@@ -1363,7 +1506,7 @@
     error_string = "Fence device %s could not be removed from configuration" % fencedev_name
  
   try:
-    model.removeFenceInstancesForFenceDevice(orig_name)
+    model.removeFenceInstancesForFenceDevice(fencedev_name)
   except:
     luci_log.debug_verbose('VFD: Could not remove fence instances for')
      
@@ -1385,7 +1528,7 @@
     try:
       clustername = model.getClusterName()
       if not clustername:
-        raise Exception, 'cluster name from modelb.getClusterName() is blank'
+        raise Exception, 'cluster name from model.getClusterName() is blank'
     except Exception, e:
       luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
       errors.append('Unable to determine cluster name from model') 
@@ -2176,7 +2319,7 @@
 
 		return None
 
-	cluname = lower(clustername)
+	cluname = clustername.lower()
 
 	for node in nodes:
 		try:
@@ -2432,7 +2575,7 @@
 			results.append(vals)
 	return results
 
-def getServicesInfo(self, status, modelb, req):
+def getServicesInfo(self, status, model, req):
 	map = {}
 	maplist = list()
 
@@ -2466,7 +2609,7 @@
 			itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE
 			itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE_DELETE
 
-			svc = modelb.retrieveServiceByName(item['name'])
+			svc = model.retrieveServiceByName(item['name'])
 			dom = svc.getAttribute("domain")
 			if dom is not None:
 				itemmap['faildom'] = dom
@@ -2477,7 +2620,7 @@
 	map['services'] = maplist
 	return map
 
-def getServiceInfo(self, status, modelb, req):
+def getServiceInfo(self, status, model, req):
 	#set up struct for service config page
 	hmap = {}
 	root_uuid = 'toplevel'
@@ -2527,7 +2670,7 @@
 					innermap['restarturl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_RESTART
 					innermap['delurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_DELETE
 
-					nodes = modelb.getNodes()
+					nodes = model.getNodes()
 					for node in nodes:
 						starturl = {}
 						if node.getName() != nodename:
@@ -2541,7 +2684,7 @@
 					innermap = {}
 					innermap['current'] = "This service is currently stopped"
 					innermap['enableurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START
-					nodes = modelb.getNodes()
+					nodes = model.getNodes()
 					starturls = list()
 					for node in nodes:
 						starturl = {}
@@ -2553,7 +2696,7 @@
 
 	#Now build hashes for resources under service.
 	#first get service by name from model
-	svc = modelb.getService(servicename)
+	svc = model.getService(servicename)
 	resource_list = list()
 	if svc is not None:
 		indent_ctr = 0
@@ -2730,7 +2873,7 @@
 	response = req.RESPONSE
 	response.redirect(req['URL'] + "?pagetype=" + SERVICE_LIST + "&clustername=" + cluname + '&busyfirst=true')
 
-def getFdomsInfo(self, modelb, request, clustatus):
+def getFdomsInfo(self, model, request, clustatus):
   slist = list()
   nlist = list()
   for item in clustatus:
@@ -2741,8 +2884,8 @@
   fdomlist = list()
   clustername = request['clustername']
   baseurl = request['URL']
-  fdoms = modelb.getFailoverDomains()
-  svcs = modelb.getServices()
+  fdoms = model.getFailoverDomains()
+  svcs = model.getServices()
   for fdom in fdoms:
     fdom_map = {}
     fdom_map['name'] = fdom.getName()
@@ -3366,10 +3509,10 @@
 	else:
 		delete_target = None
 		nodelist = model.getNodes()
-		find_node = lower(nodename)
+		find_node = nodename.lower()
 		for n in nodelist:
 			try:
-				if lower(n.getName()) == find_node:
+				if n.getName().lower() == find_node:
 					delete_target = n
 					break
 			except:
@@ -3387,6 +3530,9 @@
 				% (delete_target.getName(), str(e)))
 
 		try:
+			cp = model.getClusterPtr()
+			cp.incrementConfigVersion()
+			model.setModified(True)
 			str_buf = model.exportModelAsString()
 			if not str_buf:
 				raise Exception, 'model string is blank'
@@ -3471,8 +3617,8 @@
 				% (nodename_resolved, clustername))
 			return (False, {'errors': [ 'Node %s reports it is not in a cluster.' % nodename_resolved ]})
 
-		cname = lower(clustername)
-		if cname != lower(cluinfo[0]) and cname != lower(cluinfo[1]):
+		cname = clustername.lower()
+		if cname != cluinfo[0].lower() and cname != cluinfo[1].lower():
 			luci_log.debug('NTP6: node %s in unknown cluster %s:%s (expected %s)' % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
 			return (False, {'errors': [ 'Node %s reports it in cluster \"%s\". We expect it to be a member of cluster \"%s\"' % (nodename_resolved, cluinfo[0], clustername) ]})
 
@@ -4188,6 +4334,9 @@
     xvm.addAttribute("path", req.form['xenvmpath'])
 
   try:
+    cp = model.getClusterPtr()
+    cp.incrementConfigVersion()
+    model.setModified(True)
     stringbuf = model.exportModelAsString()
     if not stringbuf:
       raise Exception, 'model is blank'
@@ -4198,7 +4347,7 @@
   try:
     clustername = model.getClusterName()
     if not clustername:
-      raise Exception, 'cluster name from modelb.getClusterName() is blank'
+      raise Exception, 'cluster name from model.getClusterName() is blank'
   except Exception, e:
     luci_log.debug_verbose('error: getClusterName: %s' % str(e))
     return None
@@ -4538,7 +4687,7 @@
 		map['isVirtualized'] = False
 	return map
 
-def getResourcesInfo(modelb, request):
+def getResourcesInfo(model, request):
 	resList = list()
 	baseurl = request['URL']
 
@@ -4551,7 +4700,7 @@
 			luci_log.debug_verbose('getResourcesInfo missing cluster name')
 			return resList
 
-	for item in modelb.getResources():
+	for item in model.getResources():
 		itemmap = {}
 		itemmap['name'] = item.getName()
 		itemmap['attrs'] = item.attr_hash
@@ -4562,9 +4711,9 @@
 		resList.append(itemmap)
 	return resList
 
-def getResourceInfo(modelb, request):
-	if not modelb:
-		luci_log.debug_verbose('GRI0: no modelb object in session')
+def getResourceInfo(model, request):
+	if not model:
+		luci_log.debug_verbose('GRI0: no model object in session')
 		return {}
 
 	name = None
@@ -4603,7 +4752,7 @@
 		luci_log.debug_verbose('getResourceInfo missing URL')
 		return {}
 
-	for res in modelb.getResources():
+	for res in model.getResources():
 		if res.getName() == name:
 			try:
 				resMap = {}
@@ -4620,7 +4769,7 @@
 	errstr = 'An error occurred while attempting to set the new cluster.conf'
 
 	try:
-		modelb = request.SESSION.get('model')
+		model = request.SESSION.get('model')
 	except Exception, e:
 		luci_log.debug_verbose('delService0: no model: %s' % str(e))
 		return (False, {'errors': [ errstr ] })
@@ -4665,13 +4814,16 @@
 		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster.' % errstr ]})
 
 	try:
-		modelb.deleteService(name)
+		model.deleteService(name)
 	except Exception, e:
 		luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s' % (name, clustername))
 		return (False, {'errors': [ '%s: error removing service %s.' % (errstr, name) ]})
 
 	try:
-		conf = modelb.exportModelAsString()
+		cp = model.getClusterPtr()
+		cp.incrementConfigVersion()
+		model.setModified(True)
+		conf = model.exportModelAsString()
 		if not conf:
 			raise Exception, 'model string is blank'
 	except Exception, e:
@@ -4696,7 +4848,7 @@
 	errstr = 'An error occurred while attempting to set the new cluster.conf'
 
 	try:
-		modelb = request.SESSION.get('model')
+		model = request.SESSION.get('model')
 	except Exception, e:
 		luci_log.debug_verbose('delResource0: no model: %s' % str(e))
 		return errstr
@@ -4735,7 +4887,7 @@
 		luci_log.debug_verbose('delResource3: %s: %s' % (errstr, str(e)))
 		return errstr + ': could not determine the ricci agent hostname'
 
-	resPtr = modelb.getResourcesPtr()
+	resPtr = model.getResourcesPtr()
 	resources = resPtr.getChildren()
 
 	found = 0
@@ -4750,7 +4902,10 @@
 		return errstr + ': the specified resource was not found.'
 
 	try:
-		conf = modelb.exportModelAsString()
+		cp = model.getClusterPtr()
+		cp.incrementConfigVersion()
+		model.setModified(True)
+		conf = model.exportModelAsString()
 		if not conf:
 			raise Exception, 'model string is blank'
 	except Exception, e:
@@ -4779,9 +4934,9 @@
 		luci_log.debug_verbose('addIp error: form is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addIp error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addIp error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -4789,7 +4944,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 		except Exception, e:
 			luci_log.debug_verbose('addIp error: %s' % str(e))
 			return None
@@ -4825,7 +4980,7 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 def addFs(request, form=None):
 	if form is None:
@@ -4835,9 +4990,9 @@
 		luci_log.debug_verbose('addFs error: form is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addFs error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addFs error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -4845,7 +5000,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 		except Exception, e:
 			luci_log.debug_verbose('addFs error: %s' % str(e))
 			return None
@@ -4929,7 +5084,7 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 def addGfs(request, form=None):
 	if form is None:
@@ -4939,9 +5094,9 @@
 		luci_log.debug_verbose('addGfs error: form is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addGfs error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addGfs error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -4949,7 +5104,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 			if not res:
 				luci_log.debug('resource %s was not found for editing' % oldname)
 				return None
@@ -5020,7 +5175,7 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 def addNfsm(request, form=None):
 	if form is None:
@@ -5030,9 +5185,9 @@
 		luci_log.debug_verbose('addNfsm error: form is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addNfsm error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addNfsm error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -5040,7 +5195,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 		except Exception, e:
 			luci_log.debug_verbose('addNfsm error: %s' % str(e))
 			return None
@@ -5115,7 +5270,7 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 def addNfsc(request, form=None):
 	if form is None:
@@ -5125,9 +5280,9 @@
 		luci_log.debug_verbose('addNfsc error: form is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addNfsc error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addNfsc error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -5135,7 +5290,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 		except Exception, e:
 			luci_log.debug_verbose('addNfsc error: %s' % str(e))
 			return None
@@ -5179,19 +5334,19 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 def addNfsx(request, form=None):
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addNfsx error: modelb is missing')
+		luci_log.debug_verbose('addNfsx error: model is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addNfsx error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addNfsx error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -5199,7 +5354,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 		except Exception, e:
 			luci_log.debug_verbose('addNfsx error: %s', str(e))
 			return None
@@ -5227,7 +5382,7 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 def addScr(request, form=None):
 	if form is None:
@@ -5237,9 +5392,9 @@
 		luci_log.debug_verbose('addScr error: form is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addScr error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addScr error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -5247,7 +5402,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 		except Exception, e:
 			luci_log.debug_verbose('addScr error: %s' % str(e))
 			return None
@@ -5285,7 +5440,7 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 def addSmb(request, form=None):
 	if form is None:
@@ -5295,9 +5450,9 @@
 		luci_log.debug_verbose('addSmb error: form is missing')
 		return None
 
-	modelb = request.SESSION.get('model')
-	if not modelb:
-		luci_log.debug_verbose('addSmb error: modelb is missing')
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addSmb error: model is missing')
 		return None
 
 	if form.has_key('edit'):
@@ -5305,7 +5460,7 @@
 			oldname = form['oldname'].strip()
 			if not oldname:
 				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(modelb, oldname)
+			res = getResourceForEdit(model, oldname)
 		except Exception, e:
 			luci_log.debug_verbose('addSmb error: %s' % str(e))
 			return None
@@ -5341,7 +5496,7 @@
 
 	if len(errors) > 1:
 		return [None, None, errors]
-	return [res, modelb, None]
+	return [res, model, None]
 
 resourceAddHandler = {
 	'ip': addIp,
@@ -5354,9 +5509,9 @@
 	'smb': addSmb
 }
 
-def resolveClusterChanges(self, clusterName, modelb):
+def resolveClusterChanges(self, clusterName, model):
 	try:
-		mb_nodes = modelb.getNodes()
+		mb_nodes = model.getNodes()
 		if not mb_nodes or not len(mb_nodes):
 			raise Exception, 'node list is empty'
 	except Exception, e:
@@ -5431,8 +5586,8 @@
 	
 	return messages
 
-def addResource(self, request, modelb, res, res_type):
-	clustername = modelb.getClusterName()
+def addResource(self, request, model, res, res_type):
+	clustername = model.getClusterName()
 	if not clustername:
 		luci_log.debug_verbose('addResource0: no cluname from mb')
 		return 'Unable to determine cluster name'
@@ -5443,13 +5598,16 @@
 		return 'Unable to find a ricci agent for the %s cluster' % clustername
 
 	try:
-		modelb.getResourcesPtr().addChild(res)
+		model.getResourcesPtr().addChild(res)
 	except Exception, e:
 		luci_log.debug_verbose('addResource2: adding the new resource failed: %s' % str(e))
 		return 'Unable to add the new resource'
 
 	try:
-		conf = modelb.exportModelAsString()
+		cp = model.getClusterPtr()
+		cp.incrementConfigVersion()
+		model.setModified(True)
+		conf = model.exportModelAsString()
 		if not conf:
 			raise Exception, 'model string for %s is blank' % clustername
 	except Exception, e:
@@ -5484,8 +5642,8 @@
 	response = request.RESPONSE
 	response.redirect(request['URL'] + "?pagetype=" + RESOURCES + "&clustername=" + clustername + '&busyfirst=true')
 
-def getResource(modelb, name):
-	resPtr = modelb.getResourcesPtr()
+def getResource(model, name):
+	resPtr = model.getResourcesPtr()
 	resources = resPtr.getChildren()
 
 	for res in resources:
@@ -5495,8 +5653,8 @@
 	luci_log.debug_verbose('getResource: unable to find resource \"%s\"' % name)
 	raise KeyError, name
 
-def getResourceForEdit(modelb, name):
-	resPtr = modelb.getResourcesPtr()
+def getResourceForEdit(model, name):
+	resPtr = model.getResourcesPtr()
 	resources = resPtr.getChildren()
 
 	for res in resources:
@@ -5591,18 +5749,18 @@
 		return None
 
 	try:
-		modelb = ModelBuilder(0, None, None, cluster_conf_node)
-		if not modelb:
+		model = ModelBuilder(0, None, None, cluster_conf_node)
+		if not model:
 			raise Exception, 'ModelBuilder returned None'
 	except Exception, e:
 		try:
-			luci_log.debug_verbose('GMB1: An error occurred while trying to get modelb for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
+			luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
 		except:
 			luci_log.debug_verbose('GMB1: ModelBuilder failed')
 
-	if modelb:
-		modelb.setIsVirtualized(isVirtualized)
-	return modelb
+	if model:
+		model.setIsVirtualized(isVirtualized)
+	return model
 
 def getModelForCluster(self, clustername):
 	rc = getRicciAgent(self, clustername)
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/12/14 21:37:15	1.41
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/12/21 05:08:49	1.42
@@ -6,7 +6,9 @@
 from conga_constants import PLONE_ROOT, CLUSTER_NODE_NEED_AUTH, \
 							HOMEBASE_ADD_CLUSTER, HOMEBASE_ADD_CLUSTER_INITIAL, \
 							HOMEBASE_ADD_SYSTEM, HOMEBASE_ADD_USER, \
-							HOMEBASE_DEL_SYSTEM, HOMEBASE_DEL_USER, HOMEBASE_PERMS
+							HOMEBASE_DEL_SYSTEM, HOMEBASE_DEL_USER, HOMEBASE_PERMS, \
+							STORAGE_FOLDER_PATH, CLUSTER_FOLDER_PATH
+
 from ricci_bridge import getClusterConf
 from ricci_communicator import RicciCommunicator, CERTS_DIR_PATH
 from clusterOS import resolveOSType
@@ -40,7 +42,7 @@
 			if dsResult:
 				errors.append(dsResult)
 			else:
-				messages.append('Removed storage system \"' + i + '\" successfully')
+				messages.append('Removed storage system \"%s\" successfully' % i)
 
 	if '__CLUSTER' in request.form:
 		cluNames = request.form['__CLUSTER']
@@ -51,7 +53,7 @@
 			if dcResult:
 				errors.append(dcResult)
 			else:
-				messages.append('Removed cluster \"' + i + '\" successfully')
+				messages.append('Removed cluster \"%s\" successfully' % i)
 
 	if len(errors) > 0:
 		retCode = False
@@ -129,369 +131,531 @@
 	messages.append('Added new user \"' + user + '\" successfully')
 	return (True, {'messages': messages, 'params': { 'user': user }})
 
-def nodeUnauth(nodeList):
-	for i in nodeList:
-		try:
-			if i['prev_auth'] != True:
-				host = i['host']
-				rc = RicciCommunicator(host)
-				rc.unauth()
-				i['cur_auth'] = False
-		except Exception, e:
-			try:
-				luci_log.debug_verbose('unauth for %s failed: %s' \
-					% (i['host'], str(e)))
-			except:
-				pass
-
-def nodeAuth(cluster, host, passwd):
+def validateAddClusterInitial(self, request):
+	errors = list()
 	messages = list()
-	systemName = host
-	os_str = 'rhel5'
 
 	try:
-		rc = RicciCommunicator(host)
-		if not rc:
-			luci_log.debug_verbose('nodeAuth0: rc is None')
-			raise Exception, 'unknown error'
-	except Exception, e:
-		try:
-			error = 'Ricci connection to %s failed: %s' % (host, str(e))
-		except:
-			error = 'Ricci connection to %s failed' % host
-		luci_log.debug_verbose('nodeAuth1: rc failed: %s' % error)
-
-		return { 'host': host, 'ricci_host': host, 'errors': error, 'cur_auth': False, 'os': os_str }
-
-	if rc.authed():
-		prevAuth = True
-		messages.append('Luci is already authenticated to %s -- not checking password' % host)
-	else:
-		prevAuth = False
-		if not passwd:
-			return { 'host': host, 'ricci_host': systemName, 'prev_auth': False, 'cur_auth': False, 'os': os_str }
-		else:
-			try:
-				rc.auth(passwd)
-			except:
-				pass
+		request.SESSION.delete('add_cluster_initial')
+		request.SESSION.delete('add_cluster')
+	except:
+		pass
 
-	if rc.authed():
-		try:
-			os_str = resolveOSType(rc.os())
-			if not os_str:
-				raise
-		except:
-			os_str = "rhel5"  #Backup plan in case all is almost lost...
+	cur_host = None
+	try:
+		sysData = request.form['__SYSTEM0']
+		if not sysData or len(sysData) < 1:
+			raise Exception, 'no node was given'
+		cur_host = sysData[0]
+	except Exception, e:
+		luci_log.debug_verbose('vACI0: %s' % str(e))
+		return (False, { 'errors': [ 'You must provide the address of@least one node in the cluster you wish to add.' ]})
+
+	cur_entry = { 'host': cur_host }
+	try:
+		if len(sysData) < 2 or not sysData[1]:
+			raise Exception, 'no password'
+		cur_pass = sysData[1]
+		cur_entry['passwd'] = cur_pass
+	except:
+		luci_log.debug_verbose('vACI1: %s no password given')
+		request.SESSION.set('add_cluster_initial', cur_entry)
+		return (False, { 'errors': [ 'No password was given for %s' % cur_host ] })
 
-		systemName = rc.system_name()
-		if systemName[:9] == 'localhost' or systemName[:5] == '127.0':
-			systemName = host
-		node = { 'host': host, 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': True, 'os': os_str }
+	check_certs = False
+	try:
+		check_certs = request.form.has_key('check_certs')
+	except:
+		check_certs = False
 
-		cluster_info = rc.cluster_info()
-		if cluster and ((not cluster_info) or (cluster_info[0] != cluster)):
-			node['errors'] = 'Node \"' + host + '\" is reporting it is not a member of cluster \"' + cluster + '\"'
-			if cluster_info and cluster_info[0]:
-				node['errors'] += ' and that it is a member of cluster \"' + cluster_info[0] + '\"'
-		if not cluster and cluster_info and cluster_info[0]:
-			node['errors'] = 'Node \"' + host + '\" reports it is a member of cluster \"' + cluster_info[0] + '\"'
-		return node
+	cur_host_trusted = False
+	try:
+		cur_host_trusted = request.form.has_key('host_is_trusted')
+	except:
+		cur_host_trusted = False
 
-	error = 'Unable to authenticate to the ricci agent on \"' + host + '\"'
-	return { 'host': host, 'ricci_host': systemName, 'prev_auth': False , 'cur_auth': False, 'errors': error, 'os': os_str }
+	cur_host_fp = None
+	try:
+		cur_host_fp = request.form['host_fingerprint'].strip()
+		if not cur_host_fp:
+			cur_host_fp = None
+	except:
+		cur_host_fp = None
 
-def validateAddClusterInitial(self, request, must_complete=True):
-	errors = list()
-	messages = list()
-	newNodeList = list()
-	nodeHash = {}
-	rnodeHash = {}
+	try:
+		rc = RicciCommunicator(cur_host)
+		if not rc:
+			raise Exception, 'rc is None'
+		cur_fp = rc.fingerprint()
+		if cur_host_fp is not None:
+			cur_entry['fp'] = cur_host_fp
+		else:
+			cur_entry['fp'] = cur_fp[1]
+	except Exception, e:
+		luci_log.debug_verbose('vACI2: %s: %s' % (cur_host, str(e)))
+		request.SESSION.set('add_cluster_initial', cur_entry)
+		return (False, { 'errors': [ 'Unable to establish a secure connection to the ricci agent on %s: %s' \
+			% (cur_host, str(e)) ] })
+
+	if not check_certs or cur_host_trusted:
+		try:
+			if cur_host_fp is not None and cur_host_fp != cur_fp[1]:
+				errmsg = 'The key fingerprint for %s has changed from under us. It was \"%s\" and is now \"%s\".' \
+					% (cur_host, cur_host_fp, cur_fp[1])
+				request.SESSION.set('add_cluster_initial', cur_entry)
+				luci_log.info('SECURITY: %s' % errmsg)
+				return (False, { 'errors': [ errmsg ] })
+			rc.trust()
+		except Exception, e:
+			luci_log.debug_verbose('vACI3: %s %s' % (cur_host, str(e)))
+			request.SESSION.set('add_cluster_initial', cur_entry)
+			return (False, { 'errors': [ 'Unable to establish trust for host %s' % (cur_host, str(e)) ] })
+	elif check_certs:
+		if not rc.trusted():
+			msg = '%s has %s fingerprint %s' \
+				% (cur_host, cur_fp[0], cur_fp[1])
+		else:
+			cur_host_trusted = True
+			cur_entry['trusted'] = True
+			msg = 'Host %s %s fingerprint %s is already trusted.' \
+				% (cur_host, cur_fp[0], cur_fp[1])
+		request.SESSION.set('add_cluster_initial', cur_entry)
+		messages.append(msg)
+		return (True, { 'messages': [ msg ] })
 
 	try:
-		sysData = request.form['__SYSTEM0']
-		if not sysData or len(sysData) < 2:
-			raise
+		del rc
+		request.SESSION.delete('add_cluster_initial')
 	except:
-		return (False, { 'errors': [ 'At least one system and its root password must be given' ] })
+		pass
 
 	try:
-		rc = RicciCommunicator(sysData[0])
+		rc = RicciCommunicator(cur_host, enforce_trust=True)
 		if not rc:
-			raise Exception, 'unknown error'
+			raise Exception, 'rc is None'
+		cur_entry['trusted'] = rc.trusted()
 	except Exception, e:
-		return (False, { 'errors': [ 'Unable to establish a connection to the Ricci agent on %s: %s' % (sysData[0], str(e)) ] })
+		luci_log.debug_verbose('vACI4: %s %s' % (cur_host, str(e)))
+		request.SESSION.set('add_cluster_initial', cur_entry)
+		return (False, { 'errors': [ 'Unable to connect to the ricci agent on %s' % cur_host ] })
 
-	prevAuth = 0
-	if not rc.authed():
+	prev_auth = rc.authed()
+	if not prev_auth:
 		try:
-			rc.auth(sysData[1])
-		except: pass
-		if not rc.authed():
-			return (False, { 'errors': [ 'Unable to authenticate to the Ricci agent on \"' + sysData[0] + '\"' ] })
-	else:
-		prevAuth = 1
+			rc.auth(cur_pass)
+			if not rc.authed():
+				raise Exception, 'authentication failed'
+		except Exception, e:
+			errmsg = 'Unable to authenticate to the ricci agent on %s: %s' % (cur_host, str(e))
+			luci_log.debug_verbose('vACI5: %s: %s' % (cur_host, str(e)))
+			request.SESSION.set('add_cluster_initial', cur_entry)
+			return (False, { 'errors': [ 'Unable to authenticate to the ricci agent on \"%s\"' % cur_host ] })
+
+	del cur_entry
 
 	try:
 		cluster_info = rc.cluster_info()
 	except:
 		cluster_info = None
 
-	os_str = resolveOSType(rc.os())
-	if not os_str:
-		os_str = "rhel5"  #Backup plan in case all is almost lost...
-
 	if not cluster_info or not cluster_info[0]:
-		if not prevAuth:
-			rc.unauth()
+		if not prev_auth:
+			try:
+				rc.unauth()
+			except:
+				pass
+
 		if not cluster_info:
-			errmsg = 'An error occurred while attempting to retrieve the cluster.conf file for \"' + sysData[0] + '\"'
+			errmsg = 'An error occurred while attempting to retrieve the cluster.conf file from \"%s\"' % cur_host
 		else:
-			errmsg = '\"' + sysData[0] + '\" is not a member of a cluster'
+			errmsg = '\"%s\" reports is not a member of any cluster.'
 		return (False, { 'errors': [ errmsg ] })
 
-	clusterName = cluster_info[0]
-	cluConf = getClusterConf(rc)
-	if cluConf:
-		nodeList = getClusterConfNodes(cluConf)
-
-	if not cluConf or not nodeList or len(nodeList) < 1:
-		if not prevAuth:
-			rc.unauth()
-		return (False, { 'errors': [ 'Error retrieving member nodes for cluster \"' + clusterName + '\"' ] })
-
-	systemName = rc.system_name()
-	if systemName[:9] == 'localhost':
-		systemName = sysData[0]
-
-	node = { 'host': rc.hostname(), 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': rc.authed(), 'os': os_str }
-	nodeHash[sysData[0]] = node
-	rnodeHash[systemName] = node
-	newNodeList.append(node)
+	cluster_name = cluster_info[0]
+	cluster_os = resolveOSType(rc.os())
+	try:
+		cluster_conf = getClusterConf(rc)
+	except:
+		cluster_conf = None
 
-	if 'allSameCheckBox' in request.form:
-		passwd = sysData[1]
-	else:
-		passwd = None
-		
-	for i in nodeList:
-		node = nodeAuth(clusterName, i, passwd)
-		if 'messages' in node:
-			messages.extend(node['messages'])
-		if node['host'] in nodeHash or node['ricci_host'] in rnodeHash:
-			continue
-		nodeHash[node['host']] = node
-		if 'ricci_host' in node:
-			rnodeHash[node['ricci_host']] = node
-
-		if 'errors' in node:
-			errors.append(node['errors'])
-			node['errors'] = True
-		newNodeList.append(node)
-
-	sfn = lambda x, y: \
-		x['cur_auth'] - y['cur_auth'] or (('errors' in y) - ('errors' in x)) 
-	newNodeList.sort(sfn)
+	if cluster_conf:
+		try:
+			node_list = getClusterConfNodes(cluster_conf)
+		except:
+			node_list = None
 
-	if must_complete == True:
-		dfn = lambda x: not 'cur_auth' in x or x['cur_auth'] != True
-	else:
-		dfn = lambda x: False
+	# Make sure a cluster with this name is not already managed before
+	# going any further.
+	try:
+		dummy = self.restrictedTraverse(CLUSTER_FOLDER_PATH + cluster_name)
+		if not dummy:
+			raise Exception, 'no existing cluster'
+		errors.append('A cluster named \"%s\" is already managed.')
+		if not prev_auth:
+			try:
+				rc.unauth()
+			except:
+				pass
+		return (False, { 'errors': errors })
+	except:
+		pass
 
-	cluster_properties = {
-		'clusterName': clusterName,
-		'nodeList': newNodeList,
-		'nodeHash': nodeHash,
-		'rnodeHash': rnodeHash,
-		'isComplete': len(filter(dfn, newNodeList)) == 0
-	}
+	if not cluster_conf or not node_list or len(node_list) < 1:
+		if not prev_auth:
+			try:
+				rc.unauth()
+			except:
+				pass
+		return (False, { 'errors': [ 'Error retrieving the nodes list for cluster \"%s\" from node \"%s\"' % (cluster_name, cur_host) ] })
 
-	if len(errors) < len(nodeList):
-		cluster_properties['redirect'] = HOMEBASE_ADD_CLUSTER
+	same_node_passwds = False
+	try:
+		same_node_passwds = 'allSameCheckBox' in request.form
+	except:
+		same_node_passwds = False
 
-	return (len(errors) < 1,
-		{'messages': messages, 'errors': errors, 'requestResults': cluster_properties })
+	add_cluster = { 'name': cluster_name,
+					'nodes': {},
+					'cluster_os':cluster_os,
+					'pass': 0,
+					'identical_passwds': same_node_passwds,
+					'check_certs': check_certs }
+
+	for i in node_list:
+		cur_node = { 'host': i }
+		if same_node_passwds:
+			cur_node['passwd'] = cur_pass
+		add_cluster['nodes'][i] = cur_node
+	request.SESSION.set('add_cluster', add_cluster)
+	request.response.redirect('/luci/homebase/index_html?pagetype=%s' % HOMEBASE_ADD_CLUSTER)
 
-def validateAddCluster(self, request, must_complete=True):
+def parseHostForm(request, check_certs):
 	errors = list()
 	messages = list()
-	requestResults = None
-	nodeList = None
+	system_list = {}
 
 	try:
-		sessionData = request.SESSION.get('checkRet')
-		requestResults = sessionData['requestResults']
+		num_storage = int(request.form['numStorage'].strip())
 	except Exception, e:
-		luci_log.debug_verbose('VAC0: error getting session obj: %s' % str(e))
+		luci_log.debug_verbose('PHF1: numStorage field missing: %s' % str(e))
+		errors.append('The number of systems entered could not be determined.')
+
+	incomplete = False
+	i = 0
+	while i < num_storage:
 		try:
-			clusterName = request.form['clusterName']
+			sysData = request.form['__SYSTEM%d' % i]
+			if len(sysData) < 1 or not sysData[0]:
+				raise Exception, 'no hostname'
+			cur_host = sysData[0]
+			if cur_host in system_list:
+				errors.append('You have added \"%s\" more than once.' % cur_host)
+				raise Exception, '%s added more than once' % cur_host
 		except:
-			clusterName = ''
+			i += 1
+			continue
+
+		cur_system = { 'host': cur_host }
+
+		if len(sysData) < 2 or not sysData[1]:
+			errors.append('No password for %s (entry %d).' % (cur_host, i))
+			cur_passwd = None
+		else:
+			cur_passwd = sysData[1]
+			cur_system['passwd'] = cur_passwd
 
 		try:
-			nodeList = requestResults['nodeList']
-			luci_log.debug_verbose('VAC1: unauth to node list')
-			nodeUnauth(nodeList)
+			cur_fp = request.form['__SYSTEM%dFingerprint' % i].strip()
+			if not cur_fp:
+				raise Exception, 'fingerprint is blank'
+			cur_system['fp'] = cur_fp
 		except:
-			pass
+			cur_fp = None
+
+		try:
+			cur_set_trust = request.form.has_key('__SYSTEM%dTrusted' % i)
+		except:
+			cur_set_trust = False
+
+		if check_certs or (cur_fp is not None and cur_set_trust is True):
+			try:
+				rc = RicciCommunicator(cur_host, enforce_trust=False)
+				if not rc:
+					raise Exception, 'rc is None'
+				cur_system['prev_auth'] = rc.authed()
+				fp = rc.fingerprint()
+
+				if cur_set_trust is True:
+					cur_system['fp'] = cur_fp
+					if cur_fp != fp[1]:
+						errmsg = 'The key fingerprint for %s has changed from under us. It was \"%s\" and is now \"%s\".' % (cur_host, cur_fp, fp[1])
+						errors.append(errmsg)
+						luci_log.info('SECURITY: %s' % errmsg)
+						cur_system['error'] = True
+						incomplete = True
+					else:
+						rc.trust()
+						cur_system['trusted'] = True
+				else:
+					cur_system['fp'] = fp[1]
+
+				if not rc.trusted():
+					incomplete = True
+					msg = '%s has %s fingerprint %s' % (cur_host, fp[0], fp[1])
+				else:
+					cur_system['trusted'] = True
+					msg = '%s %s fingerprint %s is already trusted.' % (cur_host, fp[0], fp[1])
+
+				if check_certs:
+					messages.append(msg)
+			except Exception, e:
+				cur_system['error'] = True
+				try:
+					del cur_system['trusted']
+				except:
+					pass
+				errors.append('Unable to retrieve the SSL fingerprint for node %s: %s' % (cur_host, str(e)))
+				luci_log.debug_verbose('PHF2: %s: %s' \
+					% (cur_host, str(e)))
+		else:
+			# The user doesn't care. Trust the system.
+			try:
+				rc = RicciCommunicator(cur_host)
+				if not rc:
+					raise Exception, 'rc is None'
+				rc.trust()
+				cur_system['trusted'] = True
+				cur_system['prev_auth'] = rc.authed()
+			except Exception, e:
+				incomplete = True
+				cur_system['error'] = True
+				try:
+					if not 'prev_auth' in cur_system:
+						del cur_system['trusted']
+						rc.untrust()
+				except:
+					pass		
+				errors.append('Unable to add the key for node %s to the trusted keys list.' % cur_host)
+				luci_log.debug_verbose('PHF3: %s: %s' % (cur_host, str(e)))
+		system_list[cur_host] = cur_system
+		i += 1
+
+	return system_list, incomplete, errors, messages
+
+def validateAddCluster(self, request):
+	errors = list()
 
-		return (False, { 'errors': [ 'A data integrity error has occurred. Please attempt adding the cluster again.' ], 'requestResults': { 'clusterName': clusterName, 'isComplete': False, 'nodeList': [], 'redirect': HOMEBASE_ADD_CLUSTER_INITIAL } })
-		
 	try:
-		clusterName = request.form['clusterName']
-		if not clusterName:
-			raise Exception, 'no cluster name was found'
-	except Exception, e:
-		luci_log.debug_verbose('VAC2: no cluser name found: %s', str(e))
-		return (False, { 'errors': ['No cluster name was given.'], 'requestResults': requestResults })
+		request.SESSION.delete('add_cluster')
+		request.SESSION.delete('add_cluster_initial')
+	except:
+		pass
 
 	try:
-		nodeList = requestResults['nodeList']
-		if not nodeList or len(nodeList) < 1:
-			raise Exception, 'no node list found'
-	except Exception, e:
-		luci_log.debug_verbose('VAC3: no nodeList found: %s', str(e))
-		return (False, { 'errors': ['No cluster nodes were given.'], 'requestResults': requestResults })
+		cluster_name = request.form['clusterName'].strip()
+	except:
+		luci_log.debug_verbose('VAC0: no cluster name')
+		errors.append('No cluster name was given.')
 
 	try:
-		nodeHash = requestResults['nodeHash']
+		cluster_os = request.form['cluster_os'].strip()
 	except:
-		nodeHash = {}
+		luci_log.debug_verbose('VAC1: no cluster os')
+		errors.append('Unable to determine the version of cluster %s.' % cluster_name)
 
+	check_certs = False
 	try:
-		rnodeHash = requestResults['rnodeHash']
+		check_certs = 'check_certs' in request.form
 	except:
-		rnodeHash = {}
+		check_certs = False
 
-	# This should never fail
 	try:
-		numStorage = int(request.form['numStorage'])
-		if numStorage != len(nodeList):
-			raise Exception, 'numstorage != len(nodelist)'
-	except Exception, e:
+		pass_num = int(request.form['pass'].strip()) + 1
+	except:
+		pass_num = 1
+
+	same_node_passwds = False
+	try:
+		same_node_passwds = 'allSameCheckBox' in request.form
+	except:
+		same_node_passwds = False
+
+	add_cluster = { 'name': cluster_name,
+					'pass': pass_num,
+					'cluster_os': cluster_os,
+					'identical_passwds': same_node_passwds,
+					'check_certs': check_certs }
+
+	system_list, incomplete, new_errors, messages = parseHostForm(request, check_certs)
+	errors.extend(new_errors)
+	add_cluster['nodes'] = system_list
+
+	for i in system_list:
+		cur_system = system_list[i]
+
+		cur_host_trusted = 'trusted' in cur_system
+		cur_host = cur_system['host']
+		prev_auth = False
 		try:
-			requestResults['isComplete'] = False
-			luci_log.debug_verbose('VAC4: error: %s' % str(e))
+			cur_passwd = cur_system['passwd']
 		except:
-			pass
+			cur_passwd = None
 
-		nodeUnauth(nodeList)
-		return (False, {
-				'errors': [ 'Unknown number of nodes entered' ],
-				'requestResults': requestResults })
+		if (cur_host_trusted or not check_certs) and cur_passwd:
+			try:
+				rc = RicciCommunicator(cur_host, enforce_trust=False)
+				prev_auth = rc.authed()
+			except Exception, e:
+				errors.append('Unable to connect to the ricci agent on %s: %s' \
+					% (cur_host, str(e)))
+				incomplete = True
+				cur_system['errors'] = True
+				luci_log.debug_verbose('VAC2: %s: %s' % cur_host, str(e))
+				continue
 
-	i = 0
-	while i < numStorage:
-		sysData = request.form['__SYSTEM' + str(i)]
-		if not sysData:
-			i += 1
-			continue
+			try:
+				rc.auth(cur_passwd)
+				if not rc.authed():
+					raise Exception, 'authentication failed'
+			except Exception, e:
+				errors.append('Unable to authenticate to the ricci agent on %s: %s' \
+					% (cur_host, str(e)))
+				incomplete = True
+				cur_system['errors'] = True
+				luci_log.debug_verbose('VAC3: %s: %s' % cur_host, str(e))
+				continue
 
-		oldNode = None
-		node = nodeAuth(clusterName, sysData[0], sysData[1])
-		if node['host'] in nodeHash:
-			oldNode = nodeHash[node['host']]
-		elif 'ricci_host' in node and node['ricci_host'] in rnodeHash:
-			oldNode = rnodeHash[node['ricci_host']]
-		elif not oldNode:
-			for k in nodeHash.keys():
-				if node['host'][:len(k) + 1] == k + '.':
-					oldNode = nodeHash[k]
-		elif not oldNode:
-			for k in rnodeHash.keys():
-				if node['host'][:len(k) + 1] == k + '.':
-					oldNode = rnodeHash[k]
-
-		if not oldNode:
-			luci_log.debug_verbose('VAC5: node %s not found', sysData[0])
-			nodeUnauth(nodeList)
-			return (False, { 'errors': [ 'A data integrity error has occurred. Please attempt adding the cluster again.' ], 'requestResults': { 'clusterName': clusterName, 'nodeList': nodeList, 'isComplete': False, 'redirect': HOMEBASE_ADD_CLUSTER_INITIAL } })
-
-		if oldNode['host'] != node['host']:
-			del nodeHash[oldNode['host']]
-			oldNode['host'] = node['host']
-			nodeHash[node['host']] = oldNode
-
-		if 'ricci_host' in node and (not 'ricci_host' in oldNode or node['ricci_host'] != oldNode['ricci_host']):
-			if oldNode['ricci_host'] in rnodeHash:
-				del rnodeHash[oldNode['ricci_host']]
-				oldNode['ricci_host'] = node['ricci_host']
-				rnodeHash[node['ricci_host']] = oldNode
-
-		oldNode['cur_auth'] = node['cur_auth']
-		if 'errors' in node:
-			errors.append(node['errors'])
-			oldNode['errors'] = True
-		i += 1
+			cluster_info = rc.cluster_info()
+			if cluster_info[0] != cluster_name and cluster_info[1] != cluster_name:
+				incomplete = True
+				cur_system['errors'] = True
+
+				if cluster_info[0]:
+					cur_cluster_name = cluster_info[0]
+				else:
+					cur_cluster_name = cluster_info[1]
+
+				if cur_cluster_name:
+					err_msg = 'Node %s reports it is in cluster \"%s\" and we expect \"%s\"' \
+						% (cur_host, cur_cluster_name % cluster_name)
+				else:
+					err_msg = 'Node %s reports it is not a member of any cluster' % cur_host
+
+				if not prev_auth:
+					try:
+						rc.unauth()
+					except:
+						luci_log.debug_verbose('VAC4: %s: %s' % (cur_host, str(e)))
 
-	if must_complete == True:
-		dfn = lambda x: not 'cur_auth' in x or x['cur_auth'] != True
-	else:
-		dfn = lambda x: False
+				errors.append(err_msg)
+				luci_log.debug_verbose('VAC5: %s' % err_msg)
+				continue
 
-	clusterComplete = len(filter(dfn, nodeList)) == 0
+			cur_os = resolveOSType(rc.os())
+			if cur_os != cluster_os:
+				incomplete = True
+				cur_system['errors'] = True
+
+				if not prev_auth:
+					try:
+						rc.unauth()
+					except Exception, e:
+						luci_log.debug_verbose('VAC6: %s: %s' % (cur_host, str(e)))
 
-	if clusterComplete:
-		err = manageCluster(self, clusterName, nodeList)
-		if err:
-			errors.append(err)
+				err_msg = 'Node %s reports its cluster version is %s and we expect %s' \
+					% (cur_os, cluster_os)
+
+				errors.append(err_msg)
+				luci_log.debug_verbose('VAC7: %s' % err_msg)
+				continue
 		else:
-			messages.append('Cluster \"' + clusterName + '\" has been added to the Luci management interface.') 
-	else:
-		sfn = lambda x, y: \
-			x['cur_auth'] - y['cur_auth'] or (('errors' in y) - ('errors' in x)) 
-		nodeList.sort(sfn)
-
-	ret = { 'messages': messages, 'errors': errors }
-
-	if len(errors) > 0 or not clusterComplete:
-		ret['requestResults'] = {
-			'clusterName': clusterName,
-			'nodeList': nodeList,
-			'nodeHash': nodeHash,
-			'rnodeHash': rnodeHash,
-			'isComplete': clusterComplete
-		}
-	else:
-		ret['requestResults'] = {
-			'redirect': HOMEBASE_ADD_CLUSTER_INITIAL,
-			'clusterName': clusterName,
-			'isComplete': True
-		}
+			incomplete = True
 
-	return (len(errors) < 1, ret)
+	if len(errors) > 0:
+		incomplete = True
+
+	if not incomplete or request.form.has_key('asis'):
+		err_msg = manageCluster(self, cluster_name, system_list, cluster_os)
+		if err_msg:
+			incomplete = True
+			errors.append('An error occurred while creating the database objects for cluster %s: %s' \
+				% (cluster_name, err_msg))
+			luci_log.debug_verbose('VAC7: error adding cluster DB objects for %s: %s' \
+				% (cluster_name, err_msg))
+		else:
+			messages.append('Cluster %s is now managed by Luci' % cluster_name)
+			incomplete = False
+
+	if incomplete:
+		add_cluster['incomplete'] = True
+		request.SESSION.set('add_cluster', add_cluster)
+		return_code = False
+	else:
+		return_code = True
+	
+	return (return_code, {'errors': errors, 'messages': messages })
 
 def validateAddSystem(self, request):
-	errors = list()
-	messages = list()
+	try:
+		request.SESSION.delete('add_systems')
+	except:
+		pass
 
+	check_certs = False
 	try:
-		numStorage = request.form['numStorage']
+		check_certs = 'check_certs' in request.form
 	except:
-		return (False, { 'errors': ['Unknown number of systems entered'] })
+		check_certs = False
+
+	add_systems, incomplete, errors, messages = parseHostForm(request, check_certs)
+	delete_keys = list()
+	for i in add_systems:
+		cur_system = add_systems[i]
+
+		cur_host_trusted = 'trusted' in cur_system
+		cur_host = cur_system['host']
 
-	i = 0
-	while i < numStorage:
 		try:
-			sysData = request.form['__SYSTEM' + str(i)]
+			cur_passwd = cur_system['passwd']
 		except:
-			break
-
-		if len(sysData) == 2 and sysData[0] != '' and sysData[1] != '':
-			csResult = createSystem(self, sysData[0], sysData[1])
+			cur_passwd = None
 
+		if (cur_host_trusted or not check_certs) and cur_passwd:
+			csResult = createSystem(self, cur_host, cur_passwd)
 			if csResult:
+				incomplete = True
+				cur_system['error'] = True
 				errors.append(csResult)
 			else:
-				messages.append('Added storage system \"' + sysData[0] + '\" successfully')
-		i += 1
+				delete_keys.append(i)
+				messages.append('Added storage system \"%s\" successfully' \
+					% cur_host)
+
+	for i in delete_keys:
+		try:
+			del add_systems[i]
+		except:
+			pass
 
 	if len(errors) > 0:
-		returnCode = False
+		return_code = False
 	else:
-		returnCode = True
+		return_code = True
+
+	if incomplete:
+		try:
+			request.SESSION.set('add_systems', add_systems)
+		except Exception, e:
+			luci_log.debug_verbose('validateSA2: %s' % str(e))
+		return_code = False
+	else:
+		try:
+			request.SESSION.delete('add_systems')
+			del add_systems
+		except:
+			pass
 
-	return (returnCode, {'errors': errors, 'messages': messages})
+	return (return_code, { 'errors': errors, 'messages': messages})
 
 def validatePerms(self, request):
 	userId = None
@@ -599,62 +763,99 @@
 	return (returnCode, {'errors': errors, 'messages': messages, 'params': {'user': userId }})
 
 def validateAuthenticate(self, request):
-	errors = list()
-	messages = list()
+	try:
+		request.SESSION.delete('auth_systems')
+	except:
+		pass
 
+	check_certs = False
 	try:
-		numStorage = int(request.form['numStorage'])
+		check_certs = 'check_certs' in request.form
 	except:
-		return (False, {'errors': [ 'Unknown number of nodes entered']})
+		check_certs = False
 
-	i = 0
-	while i < numStorage:
-		sysData = request.form['__SYSTEM' + str(i)]
-		if not sysData or len(sysData) < 2 or not sysData[0] or not sysData[1]:
-			i += 1
-			continue
+	system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
+	delete_keys = list()
+	for i in system_list:
+		cur_system = system_list[i]
 
-		host = str(sysData[0])
-		passwd = str(sysData[1])
+		cur_host_trusted = 'trusted' in cur_system
+		cur_host = cur_system['host']
 
 		try:
-			rc = RicciCommunicator(sysData[0])
-			if rc is None:
-				raise Exception, 'unknown error'
-		except Exception, e:
-			errors.append('Unable to contact the ricci agent for %s: %s' \
-				% (sysData[0], str(e)))
-			i += 1
-			continue
+			cur_passwd = cur_system['passwd']
+		except:
+			cur_passwd = None
 
-		if rc.authed():
-			messages.append(host + ' is already authenticated.')
-		else:
+		if (cur_host_trusted or not check_certs) and cur_passwd:
 			try:
-				rc.auth(passwd)
-			except:
-				errors.append('Error authenticating to the ricci agent on ' + host)
-				i += 1
+				rc = RicciCommunicator(cur_host, enforce_trust=True)
+				if not rc:
+					raise Exception, 'connection failed'
+			except Exception, e:
+				luci_log.debug_verbose('validateAuth0: %s: %s' % (cur_host, str(e)))
+				errors.append('Unable to communicate with the ricci agent on %s: %s' \
+					% (cur_host, str(e)))
+				incomplete = True
+				cur_system['error'] = True
 				continue
 
-			if not rc.authed():
-				errors.append('Error authenticating to the ricci agent on ' + host)
-			else:
-				messages.append(host + ' was successfully authenticated.')
-
-			if rc.authed():
+			try:
+				if rc.authed():
+					messages.append('%s is already authenticated.' % cur_host)
+				else:
+					rc.auth(cur_passwd)
+					if not rc.authed():
+						raise Exception, 'authentication failed'
+					messages.append('Authenticated to %s successfully' \
+						% cur_host)
+				delete_keys.append(i)
 				try:
-					delNodeFlag(self, getStorageNode(self, host), CLUSTER_NODE_NEED_AUTH)
+					delNodeFlag(self, getStorageNode(self, cur_host), CLUSTER_NODE_NEED_AUTH)
 				except:
 					pass
 
 				try:
-					delNodeFlag(self, getClusterNode(self, host, rc.cluster_info()[0]), CLUSTER_NODE_NEED_AUTH)
+					delNodeFlag(self, getClusterNode(self, cur_host, rc.cluster_info()[0]), CLUSTER_NODE_NEED_AUTH)
 				except:
 					pass
-		i += 1
-			
-	return (len(errors) > 0, {'errors': errors, 'messages': messages })
+			except Exception, e:
+				errors.append('Unable to authenticate to %s: %s' % (cur_host, str(e)))
+				luci_log.debug_verbose('validateAuth1: %s: %s' % (cur_host, str(e)))
+				incomplete = True
+				cur_system['error'] = True
+
+	for i in delete_keys:
+		try:
+			del system_list[i]
+		except:
+			pass
+
+	if len(errors) > 0:
+		return_code = False
+	else:
+		return_code = True
+
+	if incomplete:
+		try:
+			request.SESSION.set('auth_systems', system_list)
+		except Exception, e:
+			luci_log.debug_verbose('validateAuthenticate2: %s' % str(e))
+		return_code = False
+	else:
+		try:
+			request.SESSION.delete('auth_systems')
+			del auth_systems
+		except:
+			pass
+
+	auth_msgs = {}
+	if len(errors) > 0:
+		auth_msgs['errors'] = errors
+	if len(messages) > 0:
+		auth_msgs['messages'] = messages
+	request.SESSION.set('auth_status', auth_msgs)
+	request.response.redirect('/luci/homebase/index_html?pagetype=5')
 
 formValidators = [
 	validateAddUser,
@@ -690,11 +891,6 @@
 	return False
 
 def homebaseControlPost(self, request):
-	try:
-		sessionData = request.SESSION.get('checkRet')
-	except:
-		sessionData = None
-
 	if 'ACTUAL_URL' in request:
 		url = request['ACTUAL_URL']
 	else:
@@ -718,24 +914,14 @@
 			pass
 		return homebasePortal(self, request, '.', '0')
 
-	if validatorFn == validateAddClusterInitial or validatorFn == validateAddCluster:
-		ret = validatorFn(self, request, must_complete=False)
-	else:
-		ret = validatorFn(self, request)
+	ret = validatorFn(self, request)
 	params = None
 
-	if 'params' in ret[1]:
-		params = ret[1]['params']
-
-	if 'requestResults' in ret[1]:
-		requestResults = ret[1]['requestResults']
+	if ret and len(ret) > 1 and ret[1]:
+		if 'params' in ret[1]:
+			params = ret[1]['params']
+		request.SESSION.set('checkRet', ret[1])
 
-		if 'redirect' in requestResults:
-			pagetype = requestResults['redirect']
-			request['pagetype'] = pagetype
-			request.form['pagetype'] = pagetype
-
-	request.SESSION.set('checkRet', ret[1])
 	return homebasePortal(self, request, url, pagetype, params)
 
 def homebaseControl(self, request):
@@ -789,33 +975,12 @@
 
 	# Initial add cluster page
 	try:
-		if pagetype == HOMEBASE_ADD_CLUSTER:
-			raise
 		if havePermAddCluster(self):
 			addCluster = {}
 			addCluster['Title'] = 'Add an Existing Cluster'
 			addCluster['absolute_url'] = url + '?pagetype=' + HOMEBASE_ADD_CLUSTER_INITIAL
 			addCluster['Description'] = 'Add an existing cluster to the Luci cluster management interface.'
-			if pagetype == HOMEBASE_ADD_CLUSTER_INITIAL:
-				addCluster['currentItem'] = True
-				ret['curIndex'] = index
-				cur = addCluster
-			else:
-				addCluster['currentItem'] = False
-			index += 1
-			temp.append(addCluster)
-	except: pass
-
-	# Add cluster - screen 2
-	try:
-		if pagetype != HOMEBASE_ADD_CLUSTER:
-			raise
-		if havePermAddCluster(self):
-			addCluster = {}
-			addCluster['Title'] = 'Add an Existing Cluster'
-			addCluster['absolute_url'] = url + '?pagetype=' + HOMEBASE_ADD_CLUSTER
-			addCluster['Description'] = 'Add an existing cluster to the Luci cluster management interface.'
-			if pagetype == HOMEBASE_ADD_CLUSTER:
+			if pagetype == HOMEBASE_ADD_CLUSTER_INITIAL or pagetype == HOMEBASE_ADD_CLUSTER:
 				addCluster['currentItem'] = True
 				ret['curIndex'] = index
 				cur = addCluster
@@ -923,7 +1088,7 @@
 def getClusterSystems(self, clusterName):
 	if isAdmin(self):
 		try:
-			return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
+			return self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName + '/objectItems')('Folder')
 		except Exception, e:
 			luci_log.debug_verbose('GCSy0: %s: %s' % (clusterName, str(e)))
 			return None
@@ -937,7 +1102,7 @@
 		return None
 
 	try:
-		csystems = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
+		csystems = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName + '/objectItems')('Folder')
 		if not csystems or len(csystems) < 1:
 			return None
 	except Exception, e:
@@ -1024,20 +1189,19 @@
 
 def createSystem(self, host, passwd):
 	try:
-		exists = self.restrictedTraverse(PLONE_ROOT +'/systems/storage/' + host)
+		dummy = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
 		luci_log.debug_verbose('CS0: %s already exists' % host)
 		return 'Storage system %s is already managed' % host
 	except:
 		pass
 
 	try:
-		rc = RicciCommunicator(host)
+		rc = RicciCommunicator(host, enforce_trust=True)
 		if rc is None:
-			raise Exception, 'unknown error'
+			raise Exception, 'rc is None'
 	except Exception, e:
 		luci_log.debug_verbose('CS1: %s: %s' % (host, str(e)))
-		return 'Unable to establish a connection to the ricci agent on %s: %s' \
-			% (host, str(e))
+		return 'Unable to establish a secure connection to the ricci agent on %s: %s' % (host, str(e))
 
 	try:
 		if not rc.authed():
@@ -1056,21 +1220,21 @@
 		return 'Authentication for storage system %s failed' % host
 
 	try:
-		exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+		dummy = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
 		luci_log.debug_verbose('CS4 %s already exists' % host)
 		return 'Storage system %s is already managed' % host
 	except:
 		pass
 
 	try:
-		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
 	except Exception, e:
 		luci_log.debug_verbose('CS5 %s: %s' % (host, str(e)))
 		return 'Unable to create storage system %s: %s' % host
 
 	try:
 		ssystem.manage_addFolder(host, '__luci__:system')
-		newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+		newSystem = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
 	except Exception, e:
 		luci_log.debug_verbose('CS6 %s: %s' % (host, str(e)))
 		return 'Unable to create DB entry for storage system %s' % host
@@ -1085,28 +1249,22 @@
 	return None
 
 def abortManageCluster(self, request):
-	try:
-		sessionData = request.SESSION.get('checkRet')
-		nodeUnauth(sessionData['requestResults']['nodeList'])
-	except Exception, e:
-		luci_log.debug_verbose('AMC0: %s' % str(e))
+	pass
 
-def manageCluster(self, clusterName, nodeList):
+def manageCluster(self, clusterName, node_list, cluster_os):
 	clusterName = str(clusterName)
 
 	try:
-		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/')
+		clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
 		if not clusters:
 			raise Exception, 'cannot find the cluster entry in the DB'
 	except Exception, e:
-		nodeUnauth(nodeList)
 		luci_log.debug_verbose('MC0: %s: %s' % (clusterName, str(e)))
 		return 'Unable to create cluster %s: the cluster directory is missing.' % clusterName
 
 	try:
-		newCluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		newCluster = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
 		if newCluster:
-			nodeUnauth(nodeList)
 			luci_log.debug_verbose('MC1: cluster %s: already exists' % clusterName)
 			return 'A cluster named %s is already managed by Luci' % clusterName
 	except:
@@ -1114,11 +1272,10 @@
 
 	try:
 		clusters.manage_addFolder(clusterName, '__luci__:cluster')
-		newCluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		newCluster = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
 		if not newCluster:
 			raise Exception, 'unable to create the cluster DB entry for %s' % clusterName
 	except Exception, e:
-		nodeUnauth(nodeList)
 		luci_log.debug_verbose('MC2: %s: %s' % (clusterName, str(e)))
 		return 'Unable to create cluster %s: %s' % (clusterName, str(e))
 
@@ -1127,7 +1284,6 @@
 		newCluster.manage_role('View', ['Access Contents Information', 'View'])
 	except Exception, e:
 		luci_log.debug_verbose('MC3: %s: %s' % (clusterName, str(e)))
-		nodeUnauth(nodeList)
 		try:
 			clusters.manage_delObjects([clusterName])
 		except Exception, e:
@@ -1135,34 +1291,22 @@
 		return 'Unable to set permissions on new cluster: %s: %s' % (clusterName, str(e))
 
 	try:
-		cluster_os = nodeList[0]['os']
-		if not cluster_os:
-			raise KeyError, 'Cluster OS is blank'
-	except KeyError, e:
-		luci_log.debug_verbose('MC5: %s: %s' % (clusterName, str(e)))
-		cluster_os = 'rhel5'
-
-	try:
 		newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
 	except Exception, e:
 		luci_log.debug_verbose('MC5: %s: %s: %s' \
 			% (clusterName, cluster_os, str(e)))
 
-	for i in nodeList:
-		#if 'ricci_host' in i:
-		#	host = str(i['ricci_host'])
-		#else:
-		host = str(i['host'])
+	for i in node_list:
+		host = node_list[i]['host']
 
 		try:
 			newCluster.manage_addFolder(host, '__luci__:csystem:' + clusterName)
-			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
+			newSystem = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH + clusterName + '/' + host))
 			if not newSystem:
 				raise Exception, 'unable to create cluster system DB entry for node %s' % host
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
-			nodeUnauth(nodeList)
 			try:
 				clusters.manage_delObjects([clusterName])
 			except Exception, e:
@@ -1175,7 +1319,7 @@
 				% (host, clusterName, str(e))
 
 	try:
-		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
 		if not ssystem:
 			raise Exception, 'The storage DB entry is missing'
 	except Exception, e:
@@ -1184,83 +1328,78 @@
 
 	# Only add storage systems if the cluster and cluster node DB
 	# objects were added successfully.
-	for i in nodeList:
-		#if 'ricci_host' in i:
-		#	host = str(i['ricci_host'])
-		#else:
-		host = str(i['host'])
+	for i in node_list:
+		host = node_list[i]['host']
 
 		try:
 			# It's already there, as a storage system, no problem.
-			exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			dummy = self.restrictedTraverse(str(STORAGE_FOLDER_PATH + host))
 			continue
 		except:
 			pass
 
 		try:
 			ssystem.manage_addFolder(host, '__luci__:system')
-			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			newSystem = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
 			luci_log.debug_verbose('MC9: %s: %s: %s' % (clusterName, host, str(e)))
 
-def createClusterSystems(self, clusterName, nodeList):
+def createClusterSystems(self, clusterName, node_list):
 	try:
-		clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		clusterObj = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
 		if not clusterObj:
 			raise Exception, 'cluster %s DB entry is missing' % clusterName
 	except Exception, e:
-		nodeUnauth(nodeList)
 		luci_log.debug_verbose('CCS0: %s: %s' % (clusterName, str(e)))
-		return 'No cluster named \"' + clusterName + '\" is managed by Luci'
+		return 'No cluster named \"%s\" is managed by Luci' % clusterName
 
-	for i in nodeList:
-		#if 'ricci_host' in i:
-		#	host = str(i['ricci_host'])
-		#else:
+	for x in node_list:
+		i = node_list[x]
 		host = str(i['host'])
 
 		try:
 			clusterObj.manage_addFolder(host, '__luci__:csystem:' + clusterName)
-			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
+		except Exception, e:
+			luci_log.debug_verbose('CCS0a: %s: %s: %s' % (clusterName, host, str(e)))
+		try:
+			newSystem = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName + '/' + host)
 			if not newSystem:
 				raise Exception, 'cluster node DB entry for %s disappeared from under us' % host
 					
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
-			nodeUnauth(nodeList)
 			luci_log.debug_verbose('CCS1: %s: %s: %s' % (clusterName, host, str(e)))
 			return 'Unable to create cluster node %s for cluster %s: %s' \
 				% (host, clusterName, str(e))
 
 	try:
-		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
 		if not ssystem:
 			raise Exception, 'storage DB entry is missing'
 	except Exception, e:
+		# This shouldn't fail, but if it does, it's harmless right now
 		luci_log.debug_verbose('CCS2: %s: %s' % (clusterName, host, str(e)))
-		return
+		return None
 
 	# Only add storage systems if the and cluster node DB
 	# objects were added successfully.
-	for i in nodeList:
-		#if 'ricci_host' in i:
-		#	host = str(i['ricci_host'])
-		#else:
+	for x in node_list:
+		i = node_list[x]
 		host = str(i['host'])
 
 		try:
 			# It's already there, as a storage system, no problem.
-			exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			dummy = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
 			continue
 		except:
 			pass
 
 		try:
 			ssystem.manage_addFolder(host, '__luci__:system')
-			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			newSystem = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
@@ -1268,7 +1407,7 @@
 
 def delSystem(self, systemName):
 	try:
-		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
 		if not ssystem:
 			raise Exception, 'storage DB entry is missing'
 	except Exception, e:
@@ -1298,7 +1437,7 @@
 			pass
 	else:
 		try:
-			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + cluster_info[0] + '/' + systemName)
+			dummy = self.restrictedTraverse(CLUSTER_FOLDER_PATH + cluster_info[0] + '/' + systemName)
 		except:
 			try:
 				rc.unauth()
@@ -1314,7 +1453,7 @@
 
 def delCluster(self, clusterName):
 	try:
-		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/')
+		clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
 		if not clusters:
 			raise Exception, 'clusters DB entry is missing'
 	except Exception, e:
@@ -1333,7 +1472,7 @@
 
 def delClusterSystem(self, cluster, systemName):
 	try:
-		if not self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + systemName):
+		if not self.restrictedTraverse(STORAGE_FOLDER_PATH + systemName):
 			raise
 	except:
 		# It's not a storage system, so unauthenticate.
@@ -1353,7 +1492,7 @@
 
 def delClusterSystems(self, clusterName):
 	try:
-		cluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		cluster = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
 		if not cluster:
 			raise Exception, 'cluster DB entry is missing'
 
@@ -1510,7 +1649,7 @@
 
 def getClusterNode(self, nodename, clustername):
 	try:
-		cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + str(clustername) + '/' + str(nodename))
+		cluster_node = self.restrictedTraverse(CLUSTER_FOLDER_PATH + str(clustername) + '/' + str(nodename))
 		if not cluster_node:
 			raise Exception, 'cluster node is none'
 		return cluster_node
@@ -1521,7 +1660,7 @@
 
 def getStorageNode(self, nodename):
 	try:
-		storage_node = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + '/' + str(nodename))
+		storage_node = self.restrictedTraverse(STORAGE_FOLDER_PATH + str(nodename))
 		if not storage_node:
 			raise Exception, 'storage node is none'
 		return storage_node
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/12/12 19:03:06	1.51
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/12/21 05:08:49	1.52
@@ -597,6 +597,8 @@
 	# temporary workaround for ricci bug
 	system_info = rc.hostname()
 	try:
+#		FIXME
+#		rc = RicciCommunicator(system_info, enforce_trust=True)
 		rc = RicciCommunicator(system_info)
 		if rc is None:
 			raise Exception, 'unknown error'
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/12/06 22:34:09	1.22
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/12/21 05:08:49	1.23
@@ -83,7 +83,19 @@
         luci_log.debug_verbose('RC:dom0: [auth %d] reported system_name = %s for %s' \
             % (self.__authed, self.__dom0, self.__hostname))
         return self.__dom0
-    
+
+    def fingerprint(self):
+		return self.ss.peer_fingerprint()
+
+    def trust(self):
+        return self.ss.trust()
+
+    def untrust(self):
+        return self.ss.untrust()
+
+    def trusted(self):
+        return self.ss.trusted()
+
     def auth(self, password):
         if self.authed():
             luci_log.debug_verbose('RC:auth0: already authenticated to %s' \
@@ -126,6 +138,10 @@
                 % (ret, self.__hostname))
             if ret != '0':
                 raise Exception, 'Invalid response'
+            try:
+                self.ss.untrust()
+            except:
+                pass
         except:
             errstr = 'Error authenticating to host %s: %s' \
                         % (self.__hostname, str(ret))



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-11-07 21:33 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-11-07 21:33 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-07 21:33:52

Modified files:
	luci/cluster   : form-macros index_html resource-form-macros 
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	- fix fenced parameter updates (properties must be strings, not ints)
	- fix more disappearing model builder object problems
	- add a configure action in the bottom left portal for each cluster

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.100&r2=1.101
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.26&r2=1.27
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&r1=1.22&r2=1.23
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.152&r2=1.153

--- conga/luci/cluster/form-macros	2006/11/07 20:28:36	1.100
+++ conga/luci/cluster/form-macros	2006/11/07 21:33:52	1.101
@@ -2176,7 +2176,6 @@
 </div>
 
 <div metal:define-macro="xenvmadd-form">
-  <span tal:define="ress python:here.appendModel(request, modelb)"/>
   <form method="get" action="" tal:attributes="action python:request['baseurl'] + '?clustername=' + request['clustername'] + '&pagetype=29'">
   <h4>Path to configuration file: </h4><input type="text" name="xenvmpath" value=""/>
   <h4>Name of configuration file: </h4><input type="text" name="xenvmname" value=""/>
@@ -2185,7 +2184,6 @@
 </div>
 
 <div metal:define-macro="xenvmconfig-form">
-  <span tal:define="ress python:here.appendModel(request, modelb)"/>
   <h4>Properties for Xen VM <font color="green"><span tal:content="request/servicename"/></font></h4>
   <span tal:define="global xeninfo python:here.getXenVMInfo(modelb, request)">
   <form method="get" action="" tal:attributes="action python:request['baseurl'] + '?clustername=' + request['clustername'] + '&pagetype=29&servicename=' + request['servicename']">
--- conga/luci/cluster/index_html	2006/11/07 20:28:36	1.26
+++ conga/luci/cluster/index_html	2006/11/07 21:33:52	1.27
@@ -164,9 +164,17 @@
 		<tal:block tal:condition="python: ri_agent">
 			<tal:block tal:define="
 				global modelb python:here.getmodelbuilder(ri_agent, isVirtualized)" />
+			<tal:block tal:condition="python: modelb">
+				<tal:block
+					tal:define="dummy python: here.appendModel(request, modelb)" />
+			</tal:block>
 		</tal:block>
     </tal:block>
 
+	<tal:block tal:condition="not: exists: modelb">
+		<tal:block tal:define="global modelb nothing" />
+	</tal:block>
+
       <table id="portal-columns">
         <tbody>
           <tr>
--- conga/luci/cluster/resource-form-macros	2006/10/30 20:42:03	1.22
+++ conga/luci/cluster/resource-form-macros	2006/11/07 21:33:52	1.23
@@ -43,8 +43,7 @@
 
 	<tal:block
 		tal:define="
-			global rescInf python: here.getResourcesInfo(modelb, request);
-			global msg python: here.appendModel(request, modelb)" />
+			global rescInf python: here.getResourcesInfo(modelb, request)" />
 
 	<table class="systemsTable">
 		<thead class="systemsTable">
@@ -258,44 +257,43 @@
 	<tal:block tal:define="global resourcename request/resourcename | request/form/resourceName | nothing" />
 	<tal:block tal:condition="resourcename"
 		tal:define="
-			global msg python: here.appendModel(request, modelb);
 			global res python: here.getResourceInfo(modelb, request);
 			global type python: 'tag_name' in res and res['tag_name'] or ''">
 
 	<h2>Configure <span tal:replace="res/name | string: resource" /></h2>
 
 	<div class="reschoose">
-		<span tal:omit-tag="" tal:condition="python: type == 'ip'">
+		<tal:block tal:condition="python: type == 'ip'">
 			<div metal:use-macro="here/resource-form-macros/macros/ip_macro" />
-		</span>
+		</tal:block>
 
-		<span tal:omit-tag="" tal:condition="python: type == 'fs'">
+		<tal:block tal:condition="python: type == 'fs'">
 			<div metal:use-macro="here/resource-form-macros/macros/fs_macro" />
-		</span>
+		</tal:block>
 
-		<span tal:omit-tag="" tal:condition="python: type == 'gfs'">
+		<tal:block tal:condition="python: type == 'gfs'">
 			<div metal:use-macro="here/resource-form-macros/macros/gfs_macro" />
-		</span>
+		</tal:block>
 
-		<span tal:omit-tag="" tal:condition="python: type == 'nfsm'">
+		<tal:block tal:condition="python: type == 'nfsm'">
 			<div metal:use-macro="here/resource-form-macros/macros/nfsm_macro"/>
-		</span>
+		</tal:block>
 
-		<span tal:omit-tag="" tal:condition="python: type == 'nfsx'">
+		<tal:block tal:condition="python: type == 'nfsx'">
 			<div metal:use-macro="here/resource-form-macros/macros/nfsx_macro"/>
-		</span>
+		</tal:block>
 
-		<span tal:omit-tag="" tal:condition="python: type == 'nfsc'">
+		<tal:block tal:condition="python: type == 'nfsc'">
 			<div metal:use-macro="here/resource-form-macros/macros/nfsc_macro"/>
-		</span>
+		</tal:block>
 
-		<span tal:omit-tag="" tal:condition="python: type == 'smb'">
+		<tal:block tal:condition="python: type == 'smb'">
 			<div metal:use-macro="here/resource-form-macros/macros/smb_macro" />
-		</span>
+		</tal:block>
 
-		<span tal:omit-tag="" tal:condition="python: type == 'script'">
+		<tal:block tal:condition="python: type == 'script'">
 			<div metal:use-macro="here/resource-form-macros/macros/scr_macro" />
-		</span>
+		</tal:block>
 	</div>
 	</tal:block>
 </div>
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 20:14:15	1.152
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 21:33:52	1.153
@@ -516,7 +516,6 @@
 	try:
 		model.usesMulticast = True
 		model.mcast_address = addr_str
-		model.setModified(True)
 	except Exception, e:
 		luci_log.debug('Error updating mcast properties: %s' % str(e))
 		errors.append('Unable to update cluster multicast properties')
@@ -720,8 +719,8 @@
 		if post_join_delay == old_pj_delay and post_fail_delay == old_pf_delay:
 			errors.append('No fence daemon properties were changed.')
 		else:
-			fd.setPostJoinDelay(post_join_delay)
-			fd.setPostFailDelay(post_fail_delay)
+			fd.setPostJoinDelay(str(post_join_delay))
+			fd.setPostFailDelay(str(post_fail_delay))
 	except Exception, e:
 		luci_log.debug_verbose('Unable to update fence daemon properties: %s' % str(e))
 		errors.append('An error occurred while attempting to update fence daemon properties.')
@@ -764,7 +763,7 @@
 			return (False, {'errors': ['No cluster model was found.']})
 
 		try:
-			model = getModelBuilder(rc, rc.dom0())
+			model = getModelBuilder(None, rc, rc.dom0())
 			if not model:
 				raise Exception, 'model is none'
 		except Exception, e:
@@ -807,7 +806,8 @@
 		try:
 			config_ver = int(cp.getConfigVersion()) + 1
 			# always increment the configuration version
-			cp.setConfigVersion(config_ver)
+			cp.setConfigVersion(str(config_ver))
+			model.setModified(True)
 			conf_str = model.exportModelAsString()
 			if not conf_str:
 				raise Exception, 'conf_str is none'
@@ -1259,6 +1259,19 @@
   kids.append(rvadd)
   kids.append(rvcfg)
   rv['children'] = kids
+ ################################################################
+
+  cprop = {}
+  cprop['Title'] = 'Configure'
+  cprop['cfg_type'] = 'configuration paramters'
+  cprop['absolute_url'] = url + '?pagetype=' + CLUSTER_CONFIG + '&clustername=' + cluname
+  cprop['Description'] = 'Change cluster configuration parameters'
+  cprop['show_children'] = False
+  if pagetype == CLUSTER_CONFIG:
+    cprop['currentItem'] = True
+  else:
+    cprop['currentItem'] = False
+
  #################################################################
   fd = {}
   fd['Title'] = "Failover Domains"
@@ -1403,6 +1416,7 @@
   mylist.append(nd)
   mylist.append(sv)
   mylist.append(rv)
+  mylist.append(cprop)
   mylist.append(fd)
   mylist.append(fen)
 
@@ -2076,7 +2090,7 @@
       luci_log.debug_verbose('GCI1: unable to find a ricci agent for the %s cluster' % cluname)
       return {}
     try:
-      model = getModelBuilder(rc, rc.dom0())
+      model = getModelBuilder(None, rc, rc.dom0())
       if not model:
         raise Exception, 'model is none'
 
@@ -4350,24 +4364,27 @@
 
 	return True
 
-def getModelBuilder(rc, isVirtualized):
+def getModelBuilder(self, rc, isVirtualized):
 	try:
 		cluster_conf_node = getClusterConf(rc)
 		if not cluster_conf_node:
-			raise
-	except:
-		luci_log.debug('unable to get cluster_conf_node in getModelBuilder')
+			raise Exception, 'getClusterConf returned None'
+	except Exception, e:
+		luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %s' % str(e))
 		return None
 
 	try:
 		modelb = ModelBuilder(0, None, None, cluster_conf_node)
+		if not modelb:
+			raise Exception, 'ModelBuilder returned None'
 	except Exception, e:
 		try:
-			luci_log.debug('An error occurred while trying to get modelb for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
+			luci_log.debug_verbose('GMB1: An error occurred while trying to get modelb for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
 		except:
-			pass
+			luci_log.debug_verbose('GMB1: ModelBuilder failed')
 
-	modelb.setIsVirtualized(isVirtualized)
+	if modelb:
+		modelb.setIsVirtualized(isVirtualized)
 	return modelb
 
 def set_node_flag(self, cluname, agent, batchid, task, desc):



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-11-03 19:13 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-11-03 19:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-03 19:13:58

Modified files:
	luci/cluster   : form-macros index_html 
	luci/homebase  : homebase_common.js 
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	add an advanced configuration panel with openais params

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.92&r2=1.93
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.24&r2=1.25
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/homebase_common.js.diff?cvsroot=cluster&r1=1.13&r2=1.14
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.141&r2=1.142

--- conga/luci/cluster/form-macros	2006/10/31 13:23:09	1.92
+++ conga/luci/cluster/form-macros	2006/11/03 19:13:57	1.93
@@ -84,7 +84,7 @@
 			<a href=""
 				tal:attributes="href cstatus/clucfg | nothing;
 								class python: 'cluster ' + cluster_status;"
-				tal:content="cstatus/clusteralias | string: [unknown]" />
+				tal:content="cstatus/clusteralias | string:[unknown]" />
 		</td>
 
 		<td class="cluster cluster_action">
@@ -397,7 +397,8 @@
 			<input type="hidden" name="pagetype"
 				tal:attributes="value request/pagetype | request/form/pagetype"
 			/>
-			<input type="hidden" name="configtype" value="general" />
+		<input type="hidden" name="configtype" value="general" />
+
 		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
 			<thead class="systemsTable">
 				<tr class="systemsTable"><td class="systemsTable" colspan="1">
@@ -423,8 +424,252 @@
 					</td>
 				</tr>
 			</tbody>
+		</table>
 
-			<tfoot class="systemsTable">
+		<table tal:condition="python: os_version and os_version == 'rhel5'">
+			<tr class="systemsTable">
+				<td class="systemsTable" colspan="2">
+					<img src="/luci/cluster/arrow_right.png" alt="[+]"
+						onclick="toggle_visible(this, 'genprops_advanced', 'genprops_advanced_label')">
+					<span id="genprops_advanced_label">Show</span>
+					advanced cluster properties
+				</td>
+			</tr>
+
+			<tr class="systemsTable invisible" id="genprops_advanced">
+				<td class="systemsTable" colspan="2">
+					<table class="systemsTable">
+						<tr class="systemsTable">
+							<td class="systemsTable">Secure Authentication</td>
+							<td class="systemsTable">
+								<input type="checkbox" name="secauth" checked="checked" />
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Redundant Ring Protocol Mode
+							</td>
+							<td class="systemsTable">
+								<select name="text" name="ais_rrp">
+									<option value="none">
+										None
+									</option>
+									<option value="active">
+										Active
+									</option>
+									<option value="passive">
+										Passive
+									</option>
+								</select>
+							</td>
+						</tr>
+
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Network MTU
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="netmtu"
+									tal:attributes="value string:1500" />
+							</td>
+						</tr>
+
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Number of Threads
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10" name="netmtu"
+									tal:attributes="value string:0" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Virtual Synchrony Type
+							</td>
+							<td class="systemsTable">
+								<select name="vfstype">
+									<option value="none">
+										None
+									</option>
+									<option value="ykd">
+										YKD
+									</option>
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Token Timeout (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10" name="token"
+									tal:attributes="value string:5000" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Token Retransmit (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="token_retransmit"
+									tal:attributes="value string:238" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Hold Token Timeout (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10" name="hold"
+									tal:attributes="value string:180" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Number of retransmits before loss
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="retransmits_before_loss"
+									tal:attributes="value string:4" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Join Timeout (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10" name="join"
+									tal:attributes="value string:100" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Consensus Timeout (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="consensus"
+									tal:attributes="value string:100" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Merge Detection Timeout (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="merge"
+									tal:attributes="value string:200" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Interface Down Check Timeout (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="downcheck"
+									tal:attributes="value string:1000" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Fail to Receive Constant
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="fail_to_recv_const"
+									tal:attributes="value string:50" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Rotations with no multicast traffic before merge detection timeout is started.
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="seqno_unchanged_const"
+									tal:attributes="value string:30" />
+							</td>
+						</tr>
+
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Number of Heartbeat Failures Allowed
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="heartbeat_failures_allowed"
+									tal:attributes="value string:0" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Maximum Network Delay (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="max_network_delay"
+									tal:attributes="value string:50" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Window Size
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="window_size"
+									tal:attributes="value string:50" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								Maximum Messages
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="max_messages"
+									tal:attributes="value string:17" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								RRP Problem Count Timeout (ms)
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="rrp_problem_count_timeout"
+									tal:attributes="value string:1000" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								RRP Problem Count Threshold
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="rrp_problem_count_threshold"
+									tal:attributes="value string:20" />
+							</td>
+						</tr>
+						<tr class="systemsTable">
+							<td class="systemsTable">
+								RRP Token Expired Timeout
+							</td>
+							<td class="systemsTable">
+								<input type="text" size="10"
+									name="rrp_token_expired_timeout"
+									tal:attributes="value string:47" />
+							</td>
+						</tr>
+					</table>
+				</td></tr>
+			</table>
+
+			<table class="systemsTable">
 				<tr class="systemsTable">
 					<td class="systemsTable" colspan="2">
 						<div class="systemsTableEnd">
@@ -433,8 +678,7 @@
 						</div>
 					</td>
 				</tr>
-			</tfoot>
-		</table>
+			</table>
 		</form>
 	</div>
 
@@ -1198,7 +1442,7 @@
 				<td>ESH Path (Optional)</td>
 				<td>
 					<input name="login" type="text"
-						tal:attributes="cur_fencedev/login | string: /opt/pan-mgr/bin/esh" />
+						tal:attributes="cur_fencedev/login | string:/opt/pan-mgr/bin/esh" />
 				</td>
 			</tr>
 		</table>
@@ -1628,8 +1872,8 @@
 						<select class="node" name="gourl">
 							<option value="">Choose a Task...</option>
 							<option tal:attributes="value nd/jl_url">
-								<span tal:condition="python: nd['status'] == '0'" tal:replace="string: Have node leave cluster" />
-								<span tal:condition="python: nd['status'] == '1'" tal:replace="string: Have node join cluster" />
+								<span tal:condition="python: nd['status'] == '0'" tal:replace="string:Have node leave cluster" />
+								<span tal:condition="python: nd['status'] == '1'" tal:replace="string:Have node join cluster" />
 							</option>
 							<option value="">----------</option>
 							<option tal:attributes="value nd/fence_it_url">Fence this node</option>
@@ -1743,7 +1987,7 @@
 				value request/form/clusterName | request/clustername | nothing"
 		/>
 
-		<h2>Add a node to <span tal:replace="request/form/clusterName | request/clustername | string: the cluster" /></h2>
+		<h2>Add a node to <span tal:replace="request/form/clusterName | request/clustername | string:the cluster" /></h2>
 
 		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
 			<thead class="systemsTable">
@@ -1866,7 +2110,7 @@
 							This service is stopped
 						</tal:block>
 					</div>
-					<p>Autostart is <span tal:condition="not: autostart" tal:replace="string: not" /> enabled for this service</p>
+					<p>Autostart is <span tal:condition="not: autostart" tal:replace="string:not" /> enabled for this service</p>
 				</td>
 			</tr>
 
--- conga/luci/cluster/index_html	2006/11/01 20:43:39	1.24
+++ conga/luci/cluster/index_html	2006/11/03 19:13:57	1.25
@@ -39,8 +39,8 @@
       <span tal:define="ri_agent python:here.getRicciAgentForCluster(request)">
 
         <span tal:define="resmap python:here.getClusterOS(ri_agent);
-                          global isVirtualized resmap/isVirtualized;
-                          global os_version resmap/os;"/>
+                          global isVirtualized resmap/isVirtualized | nothing;
+                          global os_version resmap/os | nothing"/>
       </span>
       <span tal:define="global isBusy python:here.isClusterBusy(request)"/>
       <span tal:define="global firsttime request/busyfirst |nothing"/>
--- conga/luci/homebase/homebase_common.js	2006/10/04 17:24:58	1.13
+++ conga/luci/homebase/homebase_common.js	2006/11/03 19:13:57	1.14
@@ -8,6 +8,35 @@
 		ielem.className = ielem.className.replace(/ formerror/, '');
 }
 
+function toggle_visible(img_obj, elem_id, label_id) {
+	var elem = document.getElementById(elem_id)
+	if (!elem)
+		return (-1);
+
+	var old_state = !!!elem.className.match(/invisible/i);
+
+	if (label_id) {
+		var label = document.getElementById(label_id);
+		if (!label)
+			return (-1);
+		if (old_state)
+			label.innerHTML = 'Show';
+		else
+			label.innerHTML = 'Hide';
+	}
+
+	if (old_state) {
+		img_obj.src = 'arrow_right.png';
+		img_obj.alt = '[-]';
+		elem.className += ' invisible';
+	} else {
+		img_obj.src = 'arrow_down.png';
+		img_obj.alt = '[+]';
+		elem.className = elem.className.replace(/invisible/i,'');
+	}
+	return (0);
+}
+
 function is_valid_int(str, min, max) {
 	if (str.match(/[^0-9 -]/))
 		return (0);
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 01:24:56	1.141
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 19:13:57	1.142
@@ -2618,7 +2618,7 @@
       nodename_resolved = resolve_nodename(self, clustername, name)
     except:
       luci_log.debug_verbose('Unable to resolve node name %s/%s' \
-          % (nodename, clustername))
+          % (name, clustername))
       nodename_resolved = name 
 
     map['logurl'] = '/luci/logs?nodename=' + nodename_resolved + '&clustername=' + clustername



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-10-31 17:28 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-10-31 17:28 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2006-10-31 17:28:04

Modified files:
	luci/cluster   : form-macros index_html resource-form-macros 
	                 resource_form_handlers.js 
	luci/homebase  : form-macros index_html 
	luci/logs      : index_html 
	luci/site/luci/Extensions: LuciSyslog.py ModelBuilder.py 
	                           cluster_adapters.py 
	                           homebase_adapters.py ricci_bridge.py 
	                           ricci_communicator.py 

Log message:
	fixes (or at least improvements) for bz#s: 212021, 212632, 212006, 212022, 212440, 212991, 212584, 213057

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.1&r2=1.90.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20&r2=1.20.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.21&r2=1.21.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource_form_handlers.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20&r2=1.20.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.44&r2=1.44.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.18&r2=1.18.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/logs/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.2&r2=1.1.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciSyslog.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2.2.1&r2=1.2.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.8.2.1&r2=1.8.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.7&r2=1.120.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.34.2.2&r2=1.34.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.30.2.4&r2=1.30.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.1&r2=1.9.2.2

--- conga/luci/cluster/form-macros	2006/10/25 01:53:33	1.90.2.1
+++ conga/luci/cluster/form-macros	2006/10/31 17:28:03	1.90.2.2
@@ -21,10 +21,10 @@
     <span tal:define="global nodereports isBusy/nodereports"/>
     <span tal:repeat="nodereport nodereports">
 		 <tr><td>
-      <span tal:condition="python: nodereport['isnodecreation'] == False">
+      <span tal:condition="python: not 'isnodecreation' in nodereport or nodereport['isnodecreation'] == False">
 			  <h2><span tal:content="nodereport/desc" /></h2>
       </span>
-      <span tal:condition="python: nodereport['isnodecreation'] == True">
+      <span tal:condition="python: 'isnodecreation' in nodereport and nodereport['isnodecreation'] == True">
        <span tal:condition="python: nodereport['iserror'] == True">
 			  <h2><span tal:content="nodereport/desc" /></h2>
          <font color="red"><span tal:content="nodereport/errormessage"/></font>
@@ -2060,6 +2060,7 @@
 		set_page_title('Luci ??? cluster ??? services ??? Configure a service');
 	</script>
 	<tal:block metal:use-macro="here/form-macros/macros/service-config-head-macro" />
+
 	<table class="cluster service" width="100%">
 		<tr class="cluster service info_top">
 			<td class="cluster service service_name">
@@ -2070,6 +2071,8 @@
 			</td>
 			<td class="cluster service service_action">
 				<form method="post" onSubmit="return dropdown(this.gourl)">
+					<input type="hidden" name="pagetype" tal:attributes="
+						value request/pagetype | request/form/pagetype | nothing" />
 					<select name="gourl"
 						tal:define="global innermap sinfo/innermap;
 						starturls innermap/links">
--- conga/luci/cluster/index_html	2006/10/16 20:25:33	1.20
+++ conga/luci/cluster/index_html	2006/10/31 17:28:03	1.20.2.1
@@ -212,24 +212,16 @@
 
 
              <metal:main-form-content use-macro="here/form-chooser/macros/main-form">
-                <h1>Future Site of Forms</h1>
              </metal:main-form-content>
-                  </div>
-
-                </div>
-
-              </metal:block>
-
-		<span tal:omit-tag=""
-			tal:define="global ret python: request.SESSION.get('checkRet')"
-		/>
 
+	<tal:block tal:condition="python: request.SESSION.has_key('checkRet')"
+		tal:define="ret python: request.SESSION.get('checkRet')">
 		<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
 			<div class="hbclosebox">
 				<a href="javascript:hide_element('retmsgsdiv');"><img src="../homebase/x.png"></a>
 			</div>
 			<ul class="retmsgs">
-				<tal:block repeat="e python:ret['messages']">
+				<tal:block tal:repeat="e python:ret['messages']">
 					<li class="retmsgs" tal:content="python:e" />
 				</tal:block>
 			</ul>
@@ -241,11 +233,17 @@
 			</div>
 			<p class="errmsgs">The following errors occurred:</p>
 			<ul class="errmsgs">
-				<tal:block repeat="e python:ret['errors']">
+				<tal:block tal:repeat="e python:ret['errors']">
 					<li class="errmsgs" tal:content="python:e" />
 				</tal:block>
 			</ul>
 		</div>
+	</tal:block>
+                  </div>
+
+                </div>
+
+              </metal:block>
             </td>
             <tal:comment replace="nothing"> End of main content block </tal:comment>
 
--- conga/luci/cluster/resource-form-macros	2006/10/16 04:26:19	1.21
+++ conga/luci/cluster/resource-form-macros	2006/10/31 17:28:03	1.21.2.1
@@ -199,9 +199,8 @@
 		src="/luci/cluster/resource_form_handlers.js">
 	</script>
 
-	<tal:block
-		tal:define="
-			global res python: here.getResourceInfo(modelb, request);" />
+	<tal:block tal:define="
+		global res python: here.getResourceInfo(modelb, request);" />
 
 	<h2>Add a Resource</h2>
 
--- conga/luci/cluster/resource_form_handlers.js	2006/10/07 20:12:47	1.20
+++ conga/luci/cluster/resource_form_handlers.js	2006/10/31 17:28:03	1.20.2.1
@@ -140,7 +140,7 @@
 function validate_filesystem(form) {
 	var errors = new Array();
 
-	if (!form.fsTypeSelect || str_is_blank(form.fsTypeSelect.value)) {
+	if (!form.fstype || str_is_blank(form.fstype.value)) {
 		errors.push('No file system type was given.');
 		set_form_err(form.fsTypeSelect);
 	} else
--- conga/luci/homebase/form-macros	2006/10/16 20:46:46	1.44
+++ conga/luci/homebase/form-macros	2006/10/31 17:28:04	1.44.2.1
@@ -1,7 +1,7 @@
 <html>
 
 <tal:comment tal:replace="nothing">
-	$Id: form-macros,v 1.44 2006/10/16 20:46:46 rmccabe Exp $
+	$Id: form-macros,v 1.44.2.1 2006/10/31 17:28:04 rmccabe Exp $
 </tal:comment>
 
 <head>
@@ -554,8 +554,11 @@
 		set_page_title('Luci ??? homebase ??? Add a running cluster to be managed by Luci');
 	</script>
 
-	<tal:block tal:define="
-		global sessionObj python:request.SESSION.get('checkRet')" />
+	<tal:block tal:condition="python: request.SESSION.has_key('checkRet')"
+		tal:define="global sessionObj python:request.SESSION.get('checkRet')" />
+
+	<tal:block tal:condition="python: not request.SESSION.has_key('checkRet')"
+		tal:define="global sessionObj python:{}" />
 
 	<h2 class="homebase">Add Cluster</h2>
 
--- conga/luci/homebase/index_html	2006/10/09 16:16:11	1.18
+++ conga/luci/homebase/index_html	2006/10/31 17:28:04	1.18.2.1
@@ -15,7 +15,7 @@
 					xml:lang language">
 
 <tal:comment replace="nothing">
-	$Id: index_html,v 1.18 2006/10/09 16:16:11 rmccabe Exp $
+	$Id: index_html,v 1.18.2.1 2006/10/31 17:28:04 rmccabe Exp $
 </tal:comment>
 
 <head metal:use-macro="here/header/macros/html_header">
@@ -133,16 +133,15 @@
 				Homebase
 			</metal:main_form>
 
-		<span tal:omit-tag=""
-			tal:define="global ret python: request.SESSION.get('checkRet')"
-		/>
+	<tal:block tal:condition="python: request.SESSION.has_key('checkRet')"
+		tal:define="ret python: request.SESSION.get('checkRet')">
 
 		<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
 			<div class="hbclosebox">
 				<a href="javascript:hide_element('retmsgsdiv');"><img src="x.png"></a>
 			</div>
 			<ul class="retmsgs">
-				<tal:block repeat="e python:ret['messages']">
+				<tal:block tal:repeat="e python:ret['messages']">
 					<li class="retmsgs" tal:content="python:e" />
 				</tal:block>
 			</ul>
@@ -154,11 +153,12 @@
 			</div>
 			<p class="errmsgs">The following errors occurred:</p>
 			<ul class="errmsgs">
-				<tal:block repeat="e python:ret['errors']">
+				<tal:block tal:repeat="e python:ret['errors']">
 					<li class="errmsgs" tal:content="python:e" />
 				</tal:block>
 			</ul>
 		</div>
+	</tal:block>
 
 
 				  </div>
--- conga/luci/logs/index_html	2006/10/25 16:04:13	1.1.2.2
+++ conga/luci/logs/index_html	2006/10/31 17:28:04	1.1.2.3
@@ -44,18 +44,61 @@
     </metal:javascriptslot>
   </head>
 
-  <script type="text/javascript">
-	function delWaitBox() {
-		var waitbox = document.getElementById('waitbox');
-		if (!waitbox)
-			return (-1);
-		waitbox.parentNode.removeChild(waitbox);
-		return (0);
-	}
-  </script>
 
-  <body onLoad="javascript:delWaitBox()"
-		tal:attributes="class here/getSectionFromURL;
+
+<script language="javascript" type="text/javascript">
+
+var xmlHttp_object = false;
+
+function initiate_async_get(url, funct) {
+  xmlHttp_object = false;
+
+  /*@cc_on @*/
+  /*@if (@_jscript_version >= 5)
+  try {
+    xmlHttp_object = new ActiveXObject("Msxml2.XMLHTTP");
+  } catch (e) {
+    try {
+      xmlHttp_object = new ActiveXObject("Microsoft.XMLHTTP");
+    } catch (e2) {
+      xmlHttp_object = false;
+    }
+  }
+  @end @*/
+
+  if (!xmlHttp_object && typeof XMLHttpRequest != 'undefined') {
+    xmlHttp_object = new XMLHttpRequest();
+  }
+
+  if (xmlHttp_object) {
+    xmlHttp_object.open("GET", url, true);
+    xmlHttp_object.onreadystatechange = funct;
+    xmlHttp_object.send(null);
+  } else {
+    alert("Unable to initiate async GET");
+  }
+}
+
+function replace_loginfo_callback() {
+  if (xmlHttp_object.readyState == 4) {
+    if (xmlHttp_object.status == 200) {
+        var response = xmlHttp_object.responseText;
+        document.getElementById('log_entries').innerHTML = response;
+    } else {
+        alert("Error retrieving data from server");
+    }
+  }
+}
+function replace_loginfo(url) {
+  initiate_async_get(url, replace_loginfo_callback);
+}
+</script>
+
+  <body tal:define="nodename     request/nodename;
+                    log_url      context/logs/log_provider/absolute_url;
+                    log_url_full python:log_url + '?nodename=' + nodename"
+        tal:attributes="onload python:'replace_loginfo(\'' + log_url_full + '\')';
+                        class here/getSectionFromURL;
                         dir python:test(isRTL, 'rtl', 'ltr')">
     <div id="visual-portal-wrapper">
 
@@ -69,16 +112,26 @@
 
       <div class="visualClear"><!-- --></div>
 
-	  <div id="waitbox">
-		<span>
-			Log information for <span tal:replace="request/nodename | string: host"/> is being retrieved...
-		</span>
-	    <img src="/luci/storage/100wait.gif">
-	  </div>
-
       <div id="log_data">
 		<h2>Recent log information for <span tal:replace="request/nodename | string: host"/></h2>
-          <pre tal:content="structure python: here.getLogsForNode(request)" />
+
+		
+  <div id="log_entries">   
+   <table style="width: 100%;">
+    <tr>
+     <td align="center">
+      <img src="../storage/100wait.gif" style="padding-top: 1cm;"/>
+     </td>
+    </tr>
+    <tr>
+     <td align="center">
+      <div style="padding-bottom: 4cm;">Retrieving log info</div>
+     </td>
+    </tr>
+   </table>
+  </div>
+
+
       </div>
 </body>
 </html>
--- conga/luci/site/luci/Extensions/LuciSyslog.py	2006/10/24 16:36:23	1.2.2.1
+++ conga/luci/site/luci/Extensions/LuciSyslog.py	2006/10/31 17:28:04	1.2.2.2
@@ -50,7 +50,7 @@
 		try:
 			syslog(LOG_DEBUG, msg)
 		except:
-			raise LuciSyslogError, 'syslog debug calle failed'
+			raise LuciSyslogError, 'syslog debug call failed'
 
 	def debug(self, msg):
 		if not LUCI_DEBUG_MODE or not self.__init:
@@ -58,7 +58,7 @@
 		try:
 			syslog(LOG_DEBUG, msg)
 		except:
-			raise LuciSyslogError, 'syslog debug calle failed'
+			raise LuciSyslogError, 'syslog debug call failed'
 
 	def close(self):
 		try:
--- conga/luci/site/luci/Extensions/ModelBuilder.py	2006/10/24 01:42:52	1.8.2.1
+++ conga/luci/site/luci/Extensions/ModelBuilder.py	2006/10/31 17:28:04	1.8.2.2
@@ -416,9 +416,9 @@
 
     return True
   
-  def exportModelAsString(self, strbuf):
+  def exportModelAsString(self):
     if self.perform_final_check() == False: # failed
-      return False
+      return None
     
     #check for dual power fences
     self.dual_power_fence_check()
@@ -438,7 +438,7 @@
       #can be used
       self.purgePCDuplicates()
 
-    return True
+    return strbuf
   
   def has_filepath(self):
     if self.filename == None:
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/30 20:43:25	1.120.2.7
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/31 17:28:04	1.120.2.8
@@ -258,7 +258,6 @@
     flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
     flag.manage_addProperty(LAST_STATUS, 0, "int")
 
-
 def validateAddClusterNode(self, request):
 	errors = list()
 	messages = list()
@@ -441,7 +440,7 @@
 			return (False, {'errors': ['An invalid resource type was specified: ' + res_type]})
 
 		try:
-			resObj = resourceAddHandler[res_type](self, dummy_form)
+			resObj = resourceAddHandler[res_type](request, dummy_form)
 		except:
 			luci_log('res type %d is invalid' % res_type)
 			resObj = None
@@ -453,11 +452,32 @@
 	return (True, {'messages': ['This service has been updated.']})
 
 def validateResourceAdd(self, request):
-	return (True, {})
-	
-def validateResourceEdit(self, request):
-	return (True, {})
+	try:
+		res_type = request.form['type'].strip()
+		if not res_type:
+			raise KeyError, 'type is blank'
+	except Exception, e:
+		luci_log.debug_verbose('resourceAdd: type is blank')
+		return (False, {'errors': ['No resource type was given.']})
+
+	errors = list()
+	try:
+		res = resourceAddHandler[res_type](request)
+		if res is None or res[0] is None or res[1] is None:
+			if res and res[2]:
+				errors.extend(res[2])
+			raise Exception, 'An error occurred while adding this resource'
+		modelb = res[1]
+		newres = res[0]
+		addResource(self, request, modelb, newres)
+	except Exception, e:
+		if len(errors) < 1:
+			errors.append('An error occurred while adding this resource')
+		luci_log.debug_verbose('resource error: %s' % str(e))
+		return (False, {'errors': errors})
 
+	return (True, {'messages': ['Resource added successfully']})
+	
 ## Cluster properties form validation routines
 
 def validateMCastConfig(self, form):
@@ -705,7 +725,7 @@
 	21: validateServiceAdd,
 	24: validateServiceAdd,
 	31: validateResourceAdd,
-	33: validateResourceEdit,
+	33: validateResourceAdd,
 	51: validateFenceAdd,
 	50: validateFenceEdit,
 }
@@ -724,9 +744,9 @@
   if request.REQUEST_METHOD == 'POST':
     ret = validatePost(self, request)
     try:
-		request.SESSION.set('checkRet', ret[1])
+      request.SESSION.set('checkRet', ret[1])
     except:
-		request.SESSION.set('checkRet', {})
+      request.SESSION.set('checkRet', {})
   else:
     try: request.SESSION.set('checkRet', {})
     except: pass
@@ -1332,19 +1352,21 @@
 
 def getRicciAgent(self, clustername):
 	#Check cluster permission here! return none if false
-	path = CLUSTER_FOLDER_PATH + clustername
+	path = str(CLUSTER_FOLDER_PATH + clustername)
 
 	try:
 		clusterfolder = self.restrictedTraverse(path)
 		if not clusterfolder:
-			luci_log.debug('cluster folder %s for %s is missing.' \
+			luci_log.debug('GRA: cluster folder %s for %s is missing.' \
 				% (path, clustername))
-			raise
+			raise Exception, 'no cluster folder at %s' % path
 		nodes = clusterfolder.objectItems('Folder')
 		if len(nodes) < 1:
-			luci_log.debug('no cluster nodes for %s found.' % clustername)
-			return None
-	except:
+			luci_log.debug('GRA: no cluster nodes for %s found.' % clustername)
+			raise Exception, 'no cluster nodes were found@%s' % path
+	except Exception, e:
+		luci_log.debug('GRA: cluster folder %s for %s is missing: %s.' \
+			% (path, clustername, str(e)))
 		return None
 
 	cluname = lower(clustername)
@@ -1361,24 +1383,31 @@
 		try:
 			rc = RicciCommunicator(hostname)
 		except RicciError, e:
-			luci_log.debug('ricci error: %s' % str(e))
+			luci_log.debug('GRA: ricci error: %s' % str(e))
 			continue
 
 		try:
 			clu_info = rc.cluster_info()
-			if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
-				luci_log.debug('%s reports it\'s in cluster %s:%s; we expect %s' \
+		except Exception, e:
+			luci_log.debug('GRA: cluster_info error: %s' % str(e))
+
+		if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
+			try:
+				luci_log.debug('GRA: %s reports it\'s in cluster %s:%s; we expect %s' \
 					 % (hostname, clu_info[0], clu_info[1], cluname))
-				# node reports it's in a different cluster
-				raise
-		except:
+				setNodeFlag(self, node, CLUSTER_NODE_NOT_MEMBER)
+			except:
+				pass
 			continue
 
 		if rc.authed():
 			return rc
-		setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+		try:
+			setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+		except:
+			pass
 
-	luci_log.debug('no ricci agent could be found for cluster %s' % cluname)
+	luci_log.debug('GRA: no ricci agent could be found for cluster %s' % cluname)
 	return None
 
 def getRicciAgentForCluster(self, req):
@@ -1395,23 +1424,14 @@
 	return getRicciAgent(self, clustername)
 
 def getClusterStatus(self, rc):
-	clustatus_batch ='<?xml version="1.0" ?><batch><module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module></batch>'
-
-	try:
-		clustatuscmd_xml = minidom.parseString(clustatus_batch).firstChild
-	except:
-		return {}
-
-	try:
-		ricci_xml = rc.process_batch(clustatuscmd_xml, async=False)
-	except RicciError, e:
-		luci_log.debug('ricci error: %s', str(e))
-	except:
+	doc = getClusterStatusBatch(rc)
+	if not doc:
+		try:
+			luci_log.debug_verbose('getClusterStatusBatch returned None for %s/%s' % rc.cluster_info())
+		except:
+			pass
 		return {}
 
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return {}
 	results = list()
 
 	vals = {}
@@ -1617,6 +1637,7 @@
 		try:
 			svcname = req.form['servicename']
 		except:
+			luci_log.debug_verbose('serviceStart error: no service name')
 			return None
 
 	try:
@@ -1625,91 +1646,160 @@
 		try:
 			nodename = req.form['nodename']
 		except:
-			return None
+			nodename = None
+
+	cluname = None
 	try:
 		cluname = req['clustername']
 	except KeyError, e:
 		try:
-			cluname = req.form['clusterName']
+			cluname = req.form['clustername']
 		except:
-			return None
+			pass
+
+	if cluname is None:
+		luci_log.debug_verbose('serviceStart error: %s no service name' \
+			% svcname)
+		return None
 
 	ricci_agent = rc.hostname()
 
 	batch_number, result = startService(rc, svcname, nodename)
-	#Now we need to create a DB flag for this system.
+	if batch_number is None or result is None:
+		luci_log.debug_verbose('startService %s call failed' \
+			% svcname)
+		return None
 
-	path = CLUSTER_FOLDER_PATH + cluname
-	clusterfolder = self.restrictedTraverse(path)
+	#Now we need to create a DB flag for this system.
+	path = str(CLUSTER_FOLDER_PATH + cluname)
 	batch_id = str(batch_number)
-	objname = ricci_agent + "____flag"
-	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-	#Now we need to annotate the new DB object
-	objpath = path + "/" + objname
-	flag = self.restrictedTraverse(objpath)
-	#flag[BATCH_ID] = batch_id
-	#flag[TASKTYPE] = SERVICE_START
-	#flag[FLAG_DESC] = "Starting service " + svcname
-	flag.manage_addProperty(BATCH_ID,batch_id, "string")
-	flag.manage_addProperty(TASKTYPE,SERVICE_START, "string")
-	flag.manage_addProperty(FLAG_DESC,"Starting service \'" + svcname + "\'", "string")
+	objname = str(ricci_agent + "____flag")
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = str(path + "/" + objname)
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, SERVICE_START, "string")
+		flag.manage_addProperty(FLAG_DESC, "Starting service \'" + svcname + "\'", "string")
+	except Exception, e:
+		luci_log.debug_verbose('Error creating flag at %s: %s' % (objpath, str(e)))
+
 	response = req.RESPONSE
 	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def serviceRestart(self, rc, req):
-  svcname = req['servicename']
-  batch_number, result = restartService(rc, svcname)
+	try:
+		svcname = req['servicename']
+	except KeyError, e:
+		try:
+			svcname = req.form['servicename']
+		except:
+			luci_log.debug_verbose('no service name for serviceRestart')
+			return None
+	except:
+		luci_log.debug_verbose('no service name for serviceRestart')
+		return None
 
-  ricci_agent = rc.hostname()
-  #Now we need to create a DB flag for this system.
-  cluname = req['clustername']
-
-  path = CLUSTER_FOLDER_PATH + cluname
-  clusterfolder = self.restrictedTraverse(path)
-  batch_id = str(batch_number)
-  objname = ricci_agent + "____flag"
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-  #Now we need to annotate the new DB object
-  objpath = path + "/" + objname
-  flag = self.restrictedTraverse(objpath)
-  #flag[BATCH_ID] = batch_id
-  #flag[TASKTYPE] = SERVICE_RESTART
-  #flag[FLAG_DESC] = "Restarting service " + svcname
-  flag.manage_addProperty(BATCH_ID,batch_id, "string")
-  flag.manage_addProperty(TASKTYPE,SERVICE_RESTART, "string")
-  flag.manage_addProperty(FLAG_DESC,"Restarting service " + svcname, "string")
+	#Now we need to create a DB flag for this system.
+	cluname = None
+	try:
+		cluname = req['clustername']
+	except:
+		try:
+			cluname = req.form['clustername']
+		except:
+			pass
 
-  response = req.RESPONSE
-  response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+	if cluname is None:
+		luci_log.debug_verbose('unable to determine cluser name for serviceRestart %s' % svcname)
+		return None
+
+	batch_number, result = restartService(rc, svcname)
+	if batch_number is None or result is None:
+		luci_log.debug_verbose('restartService for %s failed' % svcname)
+		return None
+				
+	ricci_agent = rc.hostname()
+
+	path = str(CLUSTER_FOLDER_PATH + cluname)
+	batch_id = str(batch_number)
+	objname = str(ricci_agent + "____flag")
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+
+		#Now we need to annotate the new DB object
+		objpath = str(path + "/" + objname)
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, SERVICE_RESTART, "string")
+		flag.manage_addProperty(FLAG_DESC, "Restarting service " + svcname, "string")
+	except Exception, e:
+		luci_log.debug_verbose('Error creating flag in restartService %s: %s' \
+			% (svcname, str(e)))
+
+	response = req.RESPONSE
+	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def serviceStop(self, rc, req):
-  svcname = req['servicename']
-  batch_number, result = stopService(rc, svcname)
+	try:
+		svcname = req['servicename']
+	except KeyError, e:
+		try:
+			svcname = req.form['servicename']
+		except:
+			luci_log.debug_verbose('no service name for serviceStop')
+			return None
+	except:
+		luci_log.debug_verbose('no service name for serviceStop')
+		return None
+
+	#Now we need to create a DB flag for this system.
+	cluname = None
+	try:
+		cluname = req['clustername']
+	except:
+		try:
+			cluname = req.form['clustername']
+		except:
+			pass
 
-  #Now we need to create a DB flag for this system.
-  cluname = req['clustername']
+	if cluname is None:
+		luci_log.debug_verbose('unable to determine cluser name for serviceStop %s' % svcname)
+		return None
 
-  ricci_agent = rc.hostname()
+	batch_number, result = stopService(rc, svcname)
+	if batch_number is None or result is None:
+		luci_log.debug_verbose('stopService for %s failed' % svcname)
+		return None
+
+	ricci_agent = rc.hostname()
 
-  path = CLUSTER_FOLDER_PATH + cluname
-  clusterfolder = self.restrictedTraverse(path)
-  batch_id = str(batch_number)
-  objname = ricci_agent + "____flag"
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-  #Now we need to annotate the new DB object
-  objpath = path + "/" + objname
-  flag = self.restrictedTraverse(objpath)
-  #flag[BATCH_ID] = batch_id
-  #flag[TASKTYPE] = SERVICE_STOP
-  #flag[FLAG_DESC] = "Stopping service " + svcname
-  flag.manage_addProperty(BATCH_ID,batch_id,"string")
-  flag.manage_addProperty(TASKTYPE,SERVICE_STOP, "string")
-  flag.manage_addProperty(FLAG_DESC,"Stopping service " + svcname,"string")
+	path = str(CLUSTER_FOLDER_PATH + cluname)
+	batch_id = str(batch_number)
+	objname = str(ricci_agent + "____flag")
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = str(path + "/" + objname)
+		flag = self.restrictedTraverse(objpath)
 
-  time.sleep(2)
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, SERVICE_STOP, "string")
+		flag.manage_addProperty(FLAG_DESC, "Stopping service " + svcname, "string")
+		time.sleep(2)
+	except Exception, e:
+		luci_log.debug_verbose('Error creating flags for stopService %s: %s' \
+			% (svcname, str(e)))
 
-  response = req.RESPONSE
-  response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+	response = req.RESPONSE
+	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def getFdomsInfo(self, modelb, request, clustatus):
   slist = list()
@@ -2008,7 +2098,7 @@
 		clustername = request['clustername']
 	except KeyError, e:
 		try:
-			clustername = request.form['clusterName']
+			clustername = request.form['clustername']
 		except:
 			luci_log.debug('missing cluster name for NTP')
 			return None
@@ -2105,16 +2195,20 @@
 			return None
 
 		batch_number, result = nodeLeaveCluster(rc)
-		batch_id = str(batch_number)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeLeaveCluster error: batch_number and/or result is None')
+			return None
 
+		batch_id = str(batch_number)
 		objpath = str(path + "/" + objname)
+
 		try:
 			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
 			#Now we need to annotate the new DB object
 			flag = self.restrictedTraverse(objpath)
 			flag.manage_addProperty(BATCH_ID, batch_id, "string")
-			flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
-			flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+			flag.manage_addProperty(TASKTYPE, NODE_LEAVE_CLUSTER, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' leaving cluster", "string")
 		except:
 			luci_log.debug('An error occurred while setting flag %s' % objpath)
 
@@ -2123,34 +2217,52 @@
 		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 	elif task == NODE_JOIN_CLUSTER:
 		batch_number, result = nodeJoinCluster(rc)
-		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-		nodefolder = self.restrictedTraverse(path)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeJoin error: batch_number and/or result is None')
+			return None
+
+		path = str(CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved)
 		batch_id = str(batch_number)
-		objname = nodename_resolved + "____flag"
-		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = path + "/" + objname
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
-		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
+		objname = str(nodename_resolved + "____flag")
+		objpath = str(path + "/" + objname)
+
+		try:
+			nodefolder = self.restrictedTraverse(path)
+			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_JOIN_CLUSTER, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' joining cluster", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeJoin error: creating flags at %s: %s' \
+				% (path, str(e)))
 
 		response = request.RESPONSE
 		#Once again, is this correct? Should we re-direct to the cluster page?
 		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 	elif task == NODE_REBOOT:
 		batch_number, result = nodeReboot(rc)
-		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-		nodefolder = self.restrictedTraverse(path)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeReboot: batch_number and/or result is None')
+			return None
+
+		path = str(CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved)
 		batch_id = str(batch_number)
-		objname = nodename_resolved + "____flag"
-		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = path + "/" + objname
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batch_id, "string")
-		flag.manage_addProperty(TASKTYPE, NODE_REBOOT, "string")
-		flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being rebooted", "string")
+		objname = str(nodename_resolved + "____flag")
+		objpath = str(path + "/" + objname)
+
+		try:
+			nodefolder = self.restrictedTraverse(path)
+			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_REBOOT, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being rebooted", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeReboot err: creating flags at %s: %s' \
+				% (path, str(e)))
 
 		response = request.RESPONSE
 		#Once again, is this correct? Should we re-direct to the cluster page?
@@ -2161,16 +2273,19 @@
 		try:
 			clusterfolder = self.restrictedTraverse(path)
 			if not clusterfolder:
-				raise
-		except:
-			luci_log.debug('The cluster folder for %s could not be found.' \
-				 % clustername)
+				raise Exception, 'no cluster folder at %s' % path
+		except Exception, e:
+			luci_log.debug('The cluster folder for %s could not be found: %s' \
+				 % (clustername, str(e)))
 			return None
 
 		try:
 			nodes = clusterfolder.objectItems('Folder')
-		except:
-			luci_log.debug('No cluster nodes for %s were found' % clustername)
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes'
+		except Exception, e:
+			luci_log.debug('No cluster nodes for %s were found: %s' \
+				% (clustername, str(e)))
 			return None
 
 		found_one = False
@@ -2210,17 +2325,26 @@
 			return None
 
 		batch_number, result = nodeFence(rc, nodename)
-		path = path + "/" + nodename_resolved
-		nodefolder = self.restrictedTraverse(path)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeFence: batch_number and/or result is None')
+			return None
+
+		path = str(path + "/" + nodename_resolved)
 		batch_id = str(batch_number)
-		objname = nodename_resolved + "____flag"
-		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = path + "/" + objname
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
-		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
+		objname = str(nodename_resolved + "____flag")
+		objpath = str(path + "/" + objname)
+
+		try:
+			nodefolder = self.restrictedTraverse(path)
+			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_FENCE, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being fenced", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeFence err: creating flags at %s: %s' \
+				% (path, str(e)))
 
 		response = request.RESPONSE
 		#Once again, is this correct? Should we re-direct to the cluster page?
@@ -2231,17 +2355,25 @@
 		#and propogate it. We will need two ricci agents for this task.
 
 		# Make sure we can find a second node before we hose anything.
-		path = CLUSTER_FOLDER_PATH + clustername
+		path = str(CLUSTER_FOLDER_PATH + clustername)
 		try:
 			clusterfolder = self.restrictedTraverse(path)
 			if not clusterfolder:
-				raise
-		except:
+				raise Exception, 'no cluster folder at %s' % path
+		except Exception, e:
+			luci_log.debug_verbose('node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
 			return None
 
-		nodes = clusterfolder.objectItems('Folder')
-		found_one = False
+		try:
+			nodes = clusterfolder.objectItems('Folder')
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes in DB'
+		except Exception, e:
+			luci_log.debug_verbose('node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
 
+		found_one = False
 		for node in nodes:
 			if node[1].getId().find(nodename) != (-1):
 				continue
@@ -2250,47 +2382,75 @@
 			# in the cluster we believe it is.
 			try:
 				rc2 = RicciCommunicator(node[1].getId())
-				if not rc2.authed():
-					# set the flag
-					rc2 = None
-				if not rc2:
-					raise
-				found_one = True
-				break
+			except Exception, e:
+				luci_log.info('ricci %s error: %s' % (node[0], str(e)))
+				continue
 			except:
 				continue
 
+			if not rc2.authed():
+				try:
+					setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				try:
+					snode = getStorageNode(self, node[0])
+					setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				luci_log.debug_verbose('%s is not authed' % node[0])
+				rc2 = None
+				continue
+			else:
+				found_one = True
+				break
+
 		if not found_one:
+			luci_log.debug_verbose('unable to find ricci node to delete %s from %s' % (nodename, clustername))
 			return None
 
 		#First, delete cluster.conf from node to be deleted.
 		#next, have node leave cluster.
 		batch_number, result = nodeLeaveCluster(rc, purge=True)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeDelete: batch_number and/or result is None')
+			return None
 
 		#It is not worth flagging this node in DB, as we are going
 		#to delete it anyway. Now, we need to delete node from model
 		#and send out new cluster.conf
 		delete_target = None
-		try:
-			nodelist = model.getNodes()
-			find_node = lower(nodename)
-			for n in nodelist:
+		nodelist = model.getNodes()
+		find_node = lower(nodename)
+		for n in nodelist:
+			try:
 				if lower(n.getName()) == find_node:
 					delete_target = n
 					break
-		except:
-			pass
+			except:
+				continue
 
 		if delete_target is None:
+			luci_log.debug_verbose('unable to find delete target for %s in %s' \
+				% (nodename, clustername))
 			return None
 
 		model.deleteNode(delete_target)
-		str_buf = ""
-		model.exportModelAsString(str_buf)
+
+		try:
+			str_buf = model.exportModelAsString()
+			if not str_buf:
+				raise Exception, 'model string is blank'
+		except Exception, e:
+			luci_log.debug_verbose('NTP exportModelAsString: %s' % str(e))
+			return None
 
 		# propagate the new cluster.conf via the second node
 		batch_number, result = setClusterConf(rc2, str(str_buf))
 		if batch_number is None:
+			luci_log.debug_verbose('batch number is None after del node in NTP')
 			return None
 
 		#Now we need to delete the node from the DB
@@ -2301,19 +2461,24 @@
 			delnode = self.restrictedTraverse(del_path)
 			clusterfolder = self.restrictedTraverse(path)
 			clusterfolder.manage_delObjects(delnode[0])
-		except:
-			# XXX - we need to handle this
-			pass
+		except Exception, e:
+			luci_log.debug_verbose('error deleting %s: %s' % (del_path, str(e)))
 
 		batch_id = str(batch_number)
 		objname = str(nodename_resolved + "____flag")
-		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
 		objpath = str(path + "/" + objname)
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_DELETE, "string")
-		flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
+
+		try:
+			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_DELETE, "string")
+			flag.manage_addProperty(FLAG_DESC, "Deleting node \'" + nodename + "\'", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeDelete %s err setting flag at %s: %s' \
+				% (nodename, objpath, str(e)))
+
 		response = request.RESPONSE
 		response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
@@ -2670,12 +2835,28 @@
     xvm.addAttribute("name", req.form['xenvmname'])
     xvm.addAttribute("path", req.form['xenvmpath'])
 
-  stringbuf = ""
-  model.exportModelAsString(stringbuf)
-  setClusterConf(stringbuf)
+  try:
+    stringbuf = model.exportModelAsString()
+    if not stringbuf:
+   	  raise Exception, 'model is blank'
+  except Exception, e:
+    luci_log.debug_verbose('exportModelAsString error: %s' % str(e))
+    return None
 
-  
-    
+  try:
+    clustername = model.getClusterName()
+    if not clustername:
+      raise Exception, 'cluster name from modelb.getClusterName() is blank'
+  except Exception, e:
+    luci_log.debug_verbose('error: getClusterName: %s' % str(e))
+    return None
+
+  rc = getRicciAgent(self, clustername)
+  if not rc:
+    luci_log.debug_verbose('Unable to find a ricci agent for the %s cluster' % clustername)
+    return None
+
+  setClusterConf(rc, stringbuf)
 
 def getXenVMInfo(self, model, request):
 	try:
@@ -2717,31 +2898,35 @@
       try:
         cluname = req.form['clusterName']
       except:
-        luci_log.debug_verbose('No cluster name -- returning empty map')
+        luci_log.debug_verbose('ICB0: No cluster name -- returning empty map')
         return map
 
-  path = CLUSTER_FOLDER_PATH + cluname
+  path = str(CLUSTER_FOLDER_PATH + cluname)
   try:
-    clusterfolder = self.restrictedTraverse(str(path))
+    clusterfolder = self.restrictedTraverse(path)
     if not clusterfolder:
       raise Exception, 'clusterfolder is None'
   except Exception, e:
-    luci_log.debug_verbose('cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
+    luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
     return map
   except:
-    luci_log.debug_verbose('cluster %s [%s] folder missing: returning empty map' % (cluname, path))
+    luci_log.debug_verbose('ICB2: cluster %s [%s] folder missing: returning empty map' % (cluname, path))
 
   try:
     items = clusterfolder.objectItems('ManagedSystem')
     if not items or len(items) < 1:
+      luci_log.debug_verbose('ICB3: no flags at %s for cluster %s' \
+          % (cluname, path))
       return map  #This returns an empty map, and should indicate not busy
   except Exception, e:
-    luci_log.debug('An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
+    luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
     return map
   except:
-    luci_log.debug('An error occurred while looking for cluster %s flags at path %s' % (cluname, path))
+    luci_log.debug('ICB5: An error occurred while looking for cluster %s flags@path %s' % (cluname, path))
     return map
-    
+
+  luci_log.debug_verbose('ICB6: isClusterBusy: %s is busy: %d flags' \
+      % (cluname, len(items)))
   map['busy'] = "true"
   #Ok, here is what is going on...if there is an item,
   #we need to call the ricci_bridge and get a batch report.
@@ -2771,31 +2956,58 @@
       batch_xml = None
       ricci = item[0].split("____") #This removes the 'flag' suffix
 
+      luci_log.debug_verbose('ICB6A: using host %s for rc for item %s' \
+          % (ricci[0], item[0]))
       try:
         rc = RicciCommunicator(ricci[0])
+        if not rc:
+          rc = None
+          raise RicciError, 'rc is None for %s' % ricci[0]
       except RicciError, e:
         rc = None
-        luci_log.debug_verbose('ricci returned error in iCB for %s: %s' \
+        luci_log.debug_verbose('ICB7: ricci returned error in iCB for %s: %s' \
           % (cluname, str(e)))
       except:
         rc = None
-        luci_log.info('ricci connection failed for cluster %s' % cluname)
+        luci_log.info('ICB8: ricci connection failed for cluster %s' % cluname)
 
+      batch_id = None
       if rc is not None:
         try:
-          batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
-          if batch_xml != None:
-            (creation_status, total) = batch_status(batch_xml)
-          else:
-            luci_log.debug_verbose('batch report for cluster %s, item %s is None' % (cluname, item[0]))
-        except:
-          creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
-          batch_xml = "bloody_failure" #set to avoid next if statement
-      else:
+          batch_id = item[1].getProperty(BATCH_ID)
+          luci_log.debug_verbose('ICB8A: got batch_id %s from %s' \
+              % (batch_id, item[0]))
+        except Exception, e:
+          try:
+            luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %s' \
+                % (item[0], str(e)))
+          except:
+            luci_log.debug_verbose('ICB8C: failed to get batch_id from %s' % item[0])
+
+        if batch_id is not None:
+          try:
+            batch_xml = rc.batch_report(batch_id)
+            if batch_xml is not None:
+              luci_log.debug_verbose('ICB8D: batch_xml for %s from batch_report is not None -- getting batch status' % batch_id)
+              (creation_status, total) = batch_status(batch_xml)
+              try:
+                luci_log.debug_verbose('ICB8E: batch status returned (%d,%d)' \
+                    % (creation_status, total))
+              except:
+                luci_log.debug_verbose('ICB8F: error logging batch status return')
+            else:
+              luci_log.debug_verbose('ICB9: batch_xml for cluster is None')
+          except Exception, e:
+            luci_log.debug_verbose('ICB9A: error getting batch_xml from rc.batch_report: %s' % str(e))
+            creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
+            batch_xml = "bloody_failure" #set to avoid next if statement
+
+      if rc is None or batch_id is None:
+          luci_log.debug_verbose('ICB12: unable to connect to a ricci agent for cluster %s to get batch status')
           creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
-          batch_xml = "bloody_failure" #set to avoid next if statement
+          batch_xml = "bloody_bloody_failure" #set to avoid next if statement
 
-      if batch_xml == None:  #The job is done and gone from queue
+      if batch_xml is None:  #The job is done and gone from queue
         if redirect_message == False: #We have not displayed this message yet
           node_report['desc'] = REDIRECT_MSG
           node_report['iserror'] = True 
@@ -2803,7 +3015,7 @@
           nodereports.append(node_report)
           redirect_message = True
 
-        luci_log.debug_verbose('batch job is done -- deleting %s' % item[0])
+        luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
         clusterfolder.manage_delObjects(item[0])
         continue
 
@@ -2857,7 +3069,7 @@
           try:
               clusterfolder.manage_delObjects(item[0])
           except Exception, e:
-              luci_log.info('Unable to delete %s: %s' % (item[0], str(e)))
+              luci_log.info('ICB14: Unable to delete %s: %s' % (item[0], str(e)))
           continue
         else:
           map['busy'] = "true"
@@ -2917,7 +3129,12 @@
 		map['isVirtualized'] = rc.dom0()
 	except:
 		# default to rhel5 if something crazy happened.
-		luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
+		try:
+			luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
+		except:
+			# this can throw an exception if the original exception
+			# is caused by rc being None or stale.
+			pass
 		map['os'] = 'rhel5'
 		map['isVirtualized'] = False
 	return map
@@ -2949,15 +3166,30 @@
 	return resList
 
 def getResourceInfo(modelb, request):
+	if not modelb:
+		luci_log.debug_verbose('no modelb obj in getResourceInfo')
+		return {}
+
+	name = None
 	try:
 		name = request['resourcename']
 	except KeyError, e:
 		try:
 			name = request.form['resourcename']
 		except:
-			luci_log.debug_verbose('getResourceInfo missing res name')
-			return {}
+			pass
 	except:
+		pass
+
+	if name is None:
+		try:
+			type = request.form['type']
+			if type == 'ip':
+				name = request.form['value'].strip()
+		except:
+			pass
+
+	if name is None:
 		luci_log.debug_verbose('getResourceInfo missing res name')
 		return {}
 
@@ -2998,7 +3230,7 @@
 	try:
 		modelb = request.SESSION.get('model')
 	except:
-		luci_log.debug_verbose('delResource unable to extract model from SESSION')
+		luci_log.debug_verbose('delRes unable to extract model from SESSION')
 		return errstr
 
 	try:
@@ -3007,10 +3239,10 @@
 		try:
 			name = request.form['resourcename']
 		except:
-			luci_log.debug_verbose('delResource missing resname %s' % str(e))
+			luci_log.debug_verbose('delRes missing resname %s' % str(e))
 			return errstr + ': ' + str(e)
 	except:
-		luci_log.debug_verbose('delResource missing resname')
+		luci_log.debug_verbose('delRes missing resname')
 		return errstr + ': ' + str(e)
 
 	try:
@@ -3019,7 +3251,7 @@
 		try:
 			clustername = request.form['clustername']
 		except:
-			luci_log.debug_verbose('delResource missing cluster name')
+			luci_log.debug_verbose('delRes missing cluster name')
 			return errstr + ': could not determine the cluster name.'
 
 	try:
@@ -3040,20 +3272,20 @@
 			break
 
 	if not found:
-		luci_log.debug_verbose('delresource cant find res %s' % name)
+		luci_log.debug_verbose('delRes cant find res %s' % name)
 		return errstr + ': the specified resource was not found.'
 
 	try:
 		conf = modelb.exportModelAsString()
 		if not conf:
-			raise
-	except:
-		luci_log.debug_verbose('exportModelAsString failed')
+			raise Exception, 'model string is blank'
+	except Exception, e:
+		luci_log.debug_verbose('delRes: exportModelAsString failed: %s' % str(e))
 		return errstr
 
-	batch_number, result = setClusterConf(str(conf))
+	batch_number, result = setClusterConf(rc, str(conf))
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('missing batch and/or result from setClusterConf')
+		luci_log.debug_verbose('delRes: missing batch and/or result from setClusterConf')
 		return errstr
 
 	modelstr = ""
@@ -3071,10 +3303,10 @@
 		flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
 		flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
 	except Exception, e:
-		luci_log.debug('An error occurred while setting flag %s: %s' \
+		luci_log.debug('delRes: An error occurred while setting flag %s: %s' \
 			% (objname, str(e)))
 	except:
-		luci_log.debug('An error occurred while setting flag %s' % objname)
+		luci_log.debug('delRes: An error occurred while setting flag %s' % objname)
 
 	response = request.RESPONSE
 	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -3083,99 +3315,142 @@
 	if form is None:
 		form = request.form
 
+	if not form:
+		luci_log.debug_verbose('addIp error: form is missing')
+		return None
+
 	modelb = request.SESSION.get('model')
-	if not modelb or not form:
+	if not modelb:
+		luci_log.debug_verbose('addIp error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
-		except KeyError, e:
+		except Exception, e:
+			luci_log.debug_verbose('addIp error: %s' % str(e))
 			return None
 	else:
-		res = apply(Ip)
+		try:
+			res = apply(Ip)
+			if not res:
+				raise Exception, 'apply(Ip) is None'
+		except Exception, e:
+			luci_log.debug_verbose('addIp error: %s' % str(e))
+			return None
 
 	if not res:
+		luci_log.debug_verbose('addIp error: res is none')
 		return None
 
+	errors = list()
 	try:
 		addr = form['ip_address'].strip()
 		if not addr:
-			raise KeyError('ip_address is blank')
+			raise KeyError, 'ip_address is blank'
 		# XXX: validate IP addr
 		res.attr_hash['address'] = addr
 	except KeyError, e:
-		return None
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addIp error: %s' % err)
 
 	if 'monitorLink' in form:
 		res.attr_hash['monitor_link'] = '1'
 	else:
 		res.attr_hash['monitor_link'] = '0'
 
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 def addFs(request, form=None):
 	if form is None:
 		form = request.form
-	modelb = request.SESSION.get('model')
 
-	if not modelb or not form:
+	if not form:
+		luci_log.debug_verbose('addFs error: form is missing')
+		return None
+
+	modelb = request.SESSION.get('model')
+	if not modelb:
+		luci_log.debug_verbose('addFs error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
-		except KeyError, e:
+		except Exception, e:
+			luci_log.debug_verbose('addFs error: %s' % str(e))
 			return None
 	else:
-		res = apply(Fs)
+		try:
+			res = apply(Fs)
+			if not res:
+				raise Exception, 'apply(Fs) is None'
+		except Exception, e:
+			luci_log.debug_verbose('addFs error: %s' % str(e))
+			return None
 
 	if not res:
+		luci_log.debug_verbose('addFs error: fs obj was not created')
 		return None
 
 	# XXX: sanity check these fields
+	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		res.attr_hash['name'] = name
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addFs error: %s' % err)
 
 	try:
 		mountpoint = form['mountpoint'].strip()
 		res.attr_hash['mountpoint'] = mountpoint
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addFs error: %s' % err)
 
 	try:
 		device = form['device'].strip()
 		res.attr_hash['device'] = device
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addFs error: %s' % err)
 
 	try:
 		options = form['options'].strip()
 		res.attr_hash['options'] = options
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addFs error: %s' % err)
 
 	try:
 		fstype = form['fstype'].strip()
 		res.attr_hash['fstype'] = fstype
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addFs error: %s' % err)
 
 	try:
 		fsid = form['fsid'].strip()
 		res.attr_hash['fsid'] = fsid
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addFs error: %s' % err)
 
 	if form.has_key('forceunmount'):
 		res.attr_hash['force_unmount'] = '1'
@@ -3192,27 +3467,33 @@
 	else:
 		res.attr_hash['force_fsck'] = '0'
 
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 def addGfs(request, form=None):
 	if form is None:
 		form = request.form
 
+	if not form:
+		luci_log.debug_verbose('addGfs error: form is missing')
+		return None
+
 	modelb = request.SESSION.get('model')
 	if not modelb:
+		luci_log.debug_verbose('addGfs error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
 			if not res:
 				luci_log.debug('resource %s was not found for editing' % oldname)
 				return None
-		except KeyError, e:
+		except Exception, e:
 			luci_log.debug('resource %s was not found for editing: %s' \
 				% (oldname, str(e)))
 			return None
@@ -3220,286 +3501,387 @@
 		try:
 			res = apply(Clusterfs)
 			if not res:
-				raise
+				raise Exception, 'apply(Clusterfs) is None'
+		except Exception, e:
+			luci_log.debug('addGfs error: %s' % str(e))
+			return None
 		except:
-			luci_log.debug('Error creating node Clusterfs resource')
+			luci_log.debug('addGfs error')
 			return None
 
 	# XXX: sanity check these fields
+	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise
+			raise KeyError, 'resourceName is blank'
 		res.attr_hash['name'] = name
-	except:
-		luci_log.debug_verbose('name is missing in clusterfs res')
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addGfs error: %s' % err)
 
 	try:
 		mountpoint = form['mountpoint'].strip()
 		res.attr_hash['mountpoint'] = mountpoint
-	except:
-		luci_log.debug_verbose('mountpoint is missing in clusterfs res')
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addGfs error: %s' % err)
 
 	try:
 		device = form['device'].strip()
 		res.attr_hash['device'] = device
-	except:
-		luci_log.debug_verbose('device is missing in clusterfs res')
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addGfs error: %s' % err)
 
 	try:
 		options = form['options'].strip()
 		res.attr_hash['options'] = options
-	except:
-		luci_log.debug_verbose('options is missing in clusterfs res')
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addGfs error: %s' % err)
 
 	try:
 		fsid = form['fsid'].strip()
 		res.attr_hash['fsid'] = fsid
-	except:
-		luci_log.debug_verbose('fsid is missing in clusterfs res')
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addGfs error: %s' % err)
 
 	if form.has_key('forceunmount'):
 		res.attr_hash['force_unmount'] = '1'
 	else:
 		res.attr_hash['force_unmount'] = '0'
 
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 def addNfsm(request, form=None):
 	if form is None:
 		form = request.form
-	modelb = request.SESSION.get('model')
 
-	if not form or not modelb:
+	if not form:
+		luci_log.debug_verbose('addNfsm error: form is missing')
+		return None
+
+	modelb = request.SESSION.get('model')
+	if not modelb:
+		luci_log.debug_verbose('addNfsm error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
-		except KeyError, e:
+		except Exception, e:
+			luci_log.debug_verbose('addNfsm error: %s' % str(e))
 			return None
 	else:
-		res = apply(Netfs)
+		try:
+			res = apply(Netfs)
+		except Exception, e:
+			luci_log.debug_verbose('addNfsm error: %s' % str(e))
+			return None
 
 	if not res:
 		return None
 
 	# XXX: sanity check these fields
+	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise
+			raise KeyError, 'resourceName is blank'
 		res.attr_hash['name'] = name
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsm error: %s' % err)
 
 	try:
 		mountpoint = form['mountpoint'].strip()
 		res.attr_hash['mountpoint'] = mountpoint
-	except:
-		return None
-
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsm error: %s' % err)
+		
 	try:
 		host = form['host'].strip()
 		res.attr_hash['host'] = host
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsm error: %s' % err)
 
 	try:
 		options = form['options'].strip()
 		res.attr_hash['options'] = options
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsm error: %s' % err)
 
 	try:
 		exportpath = form['exportpath'].strip()
 		res.attr_hash['exportpath'] = exportpath 
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsm error: %s' % err)
 
 	try:
 		nfstype = form['nfstype'].strip().lower()
 		if nfstype != 'nfs' and nfstype != 'nfs4':
-			raise
+			raise KeyError, 'invalid nfs type: %s' % nfstype
 		res.attr_hash['nfstype'] = nfstype
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsm error: %s' % err)
 
 	if form.has_key('forceunmount'):
 		res.attr_hash['force_unmount'] = '1'
 	else:
 		res.attr_hash['force_unmount'] = '0'
 
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 def addNfsc(request, form=None):
 	if form is None:
 		form = request.form
-	modelb = request.SESSION.get('model')
 
-	if not form or not modelb:
+	if not form:
+		luci_log.debug_verbose('addNfsc error: form is missing')
+		return None
+
+	modelb = request.SESSION.get('model')
+	if not modelb:
+		luci_log.debug_verbose('addNfsc error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
-		except KeyError, e:
+		except Exception, e:
+			luci_log.debug_verbose('addNfsc error: %s' % str(e))
 			return None
 	else:
-		res = apply(NFSClient)
+		try:
+			res = apply(NFSClient)
+		except:
+			luci_log.debug_verbose('addNfsc error: %s' % str(e))
+			return None
 
 	if not res:
+		luci_log.debug_verbose('addNfsc error: res is none')
 		return None
 
+	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise
+			raise KeyError, 'resourceName is blank'
 		res.attr_hash['name'] = name
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsc error: %s' % err)
 
 	try:
 		target = form['target'].strip()
 		res.attr_hash['target'] = target 
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsc error: %s' % err)
 
 	try:
 		options = form['options'].strip()
 		res.attr_hash['options'] = options
-	except:
-		return None
-
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsc error: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 def addNfsx(request, form=None):
 	if form is None:
 		form = request.form
-	modelb = request.SESSION.get('model')
 
-	if not modelb or not form:
+	if not form:
+		luci_log.debug_verbose('addNfsx error: modelb is missing')
+		return None
+
+	modelb = request.SESSION.get('model')
+	if not modelb:
+		luci_log.debug_verbose('addNfsx error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
-		except KeyError, e:
+		except Exception, e:
+			luci_log.debug_verbose('addNfsx error: %s', str(e))
 			return None
 	else:
-		res = apply(NFSExport)
+		try:
+			res = apply(NFSExport)
+		except:
+			luci_log.debug_verbose('addNfsx error: %s', str(e))
+			return None
 
 	if not res:
+		luci_log.debug_verbose('addNfsx error: res is None')
 		return None
 
+	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise
+			raise KeyError, 'resourceName is blank'
 		res.attr_hash['name'] = name
-	except:
-		return None
-
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addNfsx error: %s', err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 def addScr(request, form=None):
 	if form is None:
 		form = request.form
-	modelb = request.SESSION.get('model')
-	form = request.form
 
-	if not modelb or not form:
+	if not form:
+		luci_log.debug_verbose('addScr error: form is missing')
+		return None
+
+	modelb = request.SESSION.get('model')
+	if not modelb:
+		luci_log.debug_verbose('addScr error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
-		except KeyError, e:
+		except Exception, e:
+			luci_log.debug_verbose('addScr error: %s' % str(e))
 			return None
 	else:
-		res = apply(Script)
+		try:
+			res = apply(Script)
+		except Exception, e:
+			luci_log.debug_verbose('addScr error: %s' % str(e))
+			return None
 
 	if not res:
+		luci_log.debug_verbose('addScr error: res is None')
 		return None
 
+	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise
+			raise KeyError, 'resourceName is blank'
 		res.attr_hash['name'] = name
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addScr error: %s' % err)
 
 	try:
 		file = form['file'].strip()
 		if not file:
-			raise
+			raise KeyError, 'file path is blank'
 		res.attr_hash['file'] = file
-	except:
-		return None
-
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addScr error: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 def addSmb(request, form=None):
 	if form is None:
 		form = request.form
-	modelb = request.SESSION.get('model')
 
-	if not modelb or not form:
+	if not form:
+		luci_log.debug_verbose('addSmb error: form is missing')
+		return None
+
+	modelb = request.SESSION.get('model')
+	if not modelb:
+		luci_log.debug_verbose('addSmb error: modelb is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError('oldname is blank.')
+				raise KeyError, 'oldname is blank.'
 			res = getResourceForEdit(modelb, oldname)
-		except KeyError, e:
+		except Exception, e:
+			luci_log.debug_verbose('addSmb error: %s' % str(e))
 			return None
 	else:
-		res = apply(Samba)
+		try:
+			res = apply(Samba)
+		except Exception, e:
+			luci_log.debug_verbose('addSmb error: %s' % str(e))
+			return None
 
 	if not res:
+		luci_log.debug_verbose('addSmb error: res is None')
 		return None
 
+	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise
+			raise KeyError, 'resourceName is blank'
 		res.attr_hash['name'] = name
-	except:
-		return None
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addSmb error: %s' % err)
 
 	try:
 		workgroup = form['workgroup'].strip()
 		res.attr_hash['workgroup'] = workgroup
-	except:
-		return None
-
-	modelb.getResourcesPtr().addChild(res)
-	return res
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addSmb error: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, modelb, None]
 
 resourceAddHandler = {
 	'ip': addIp,
@@ -3582,48 +3964,37 @@
 	
 	return messages
 
-def addResource(self, rc, request):
-	if not request.form:
-		return (False, {'errors': ['No form was submitted.']})
+def addResource(self, request, modelb, res):
+	clustername = modelb.getClusterName()
+	if not clustername:
+		raise Exception, 'cluster name from modelb.getClusterName() is blank'
+
+	rc = getRicciAgent(self, clustername)
+	if not rc:
+		raise Exception, 'Unable to find a ricci agent for the %s cluster' % clustername
 
-	try:
-		type = request.form['type'].strip()
-		if not type or not type in resourceAddHandler:
-			raise
-	except:
-		return (False, {'errors': ['Form type is missing.']})
-
-	try:
-		resname = request.form['resourceName']
-	except KeyError, e:
-		# For IP, the IP address itself is the name.
-		if request.form['type'] != 'ip':
-			return (False, {'errors': ['No resource name was given.']})
+	modelb.getResourcesPtr().addChild(res)
 
 	try:
-		clustername = request['clustername']
-	except KeyError, e:
-		try:
-			clustername = request.form['clustername']
-		except:
-			return 'unable to determine the current cluster\'s name'
-
-	res = resourceAddHandler[type](request)
-	modelb = request.SESSION.get('model')
-	modelstr = ""
-	conf = modelb.exportModelAsString()
+		conf = modelb.exportModelAsString()
+		if not conf:
+			raise Exception, 'model string for %s is blank' % clustername
+	except Exception, e:
+		luci_log.debug_verbose('addResource: exportModelAsString err: %s' % str(e))
+		return 'An error occurred while adding this resource'
 
 	try:
 		ragent = rc.hostname()
 		if not ragent:
-			luci_log.debug('missing hostname')
-			raise
-		batch_number, result = setClusterConf(str(conf))
+			luci_log.debug_verbose('missing hostname')
+			raise Exception, 'unknown ricci agent hostname'
+		luci_log.debug_verbose('SENDING NEW CLUSTER CONF: %s' % conf)
+		batch_number, result = setClusterConf(rc, str(conf))
 		if batch_number is None or result is None:
-			luci_log.debug('missing batch_number or result')
-			raise
-	except:
-		return "Some error occured in setClusterConf\n"
+			luci_log.debug_verbose('missing batch_number or result')
+			raise Exception, 'batch_number or results is None from setClusterConf'
+	except Exception, e:
+		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
 
 	path = str(CLUSTER_FOLDER_PATH + clustername)
 	clusterfolder = self.restrictedTraverse(path)
@@ -3639,7 +4010,7 @@
 		flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
 
 		if type != 'ip':
-			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
+			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['name'] + "\'", "string")
 		else:
 			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
 	except Exception, e:
@@ -3668,7 +4039,7 @@
 		request.SESSION.set('model', model)
 	except:
 		luci_log.debug_verbose('Appending model to request failed')
-		return False
+		return 'An error occurred while storing the cluster model.' 
 
 def resolve_nodename(self, clustername, nodename):
 	path = str(CLUSTER_FOLDER_PATH + clustername)
@@ -3733,7 +4104,7 @@
 	try:
 		cluster_conf_node = getClusterConf(rc)
 		if not cluster_conf_node:
-			raise;
+			raise
 	except:
 		luci_log.debug('unable to get cluster_conf_node in getModelBuilder')
 		return None
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/30 20:20:04	1.34.2.2
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/31 17:28:04	1.34.2.3
@@ -14,9 +14,6 @@
 from clusterOS import resolveOSType
 from conga_constants import *
 
-class InCluster(Exception):
-	pass
-
 def siteIsSetup(self):
 	try:
 		if os.path.isfile(CERTS_DIR_PATH + 'privkey.pem') and os.path.isfile(CERTS_DIR_PATH + 'cacert.pem'):
@@ -661,23 +658,20 @@
 	except:
 		sessionData = None
 
+	try:
+		request.SESSION.delete('checkRet')
+	except:
+		pass
+
 	if 'ACTUAL_URL' in request:
 		url = request['ACTUAL_URL']
 	else:
 		url = '.'
 
-	if 'pagetype' in request.form:
-		pagetype = int(request.form['pagetype'])
-	else:
-		try: request.SESSION.set('checkRet', {})
-		except: pass
-		return homebasePortal(self, request, '.', '0')
-
 	try:
+		pagetype = int(request.form['pagetype'])
 		validatorFn = formValidators[pagetype - 1]
 	except:
-		try: request.SESSION.set('checkRet', {})
-		except: pass
 		return homebasePortal(self, request, '.', '0')
 
 	if validatorFn == validateAddClusterInitial or validatorFn == validateAddCluster:
@@ -705,7 +699,7 @@
 		return homebaseControlPost(self, request)
 
 	try:
-		request.SESSION.set('checkRet', {})
+		request.SESSION.delete('checkRet')
 	except:
 		pass
 
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/25 16:00:40	1.30.2.4
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/31 17:28:04	1.30.2.5
@@ -2,6 +2,12 @@
 from time import time, ctime
 from xml.dom import minidom
 from ricci_communicator import RicciCommunicator
+from LuciSyslog import LuciSyslog
+
+try:
+	luci_log = LuciSyslog()
+except:
+	pass
 
 def checkBatch(rc, batch_id):
 	try:
@@ -200,20 +206,29 @@
 
 	return minidom.parseString(batch).firstChild
 
-def batchAttemptResult(self, doc):
-	docc = None
-	rc_node = None
+def batchAttemptResult(doc):
+	try:
+		batch = doc.getElementsByTagName('batch')
+		if not batch or len(batch) < 1:
+			raise Exception, 'no batch tag was found'
+	except Exception, e:
+		luci_log.debug_verbose('batchAttemptResult: %s' % str(e))
 
-	for node in doc.firstChild.childNodes:
-		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-			if node.nodeName == 'batch':
-				#get batch number and status code
-				batch_number = node.getAttribute('batch_id')
-				result = node.getAttribute('status')
-				return (batch_number, result)
-			else:
-				#print "RETURNING NONE!!!"
-				return (None, None)
+	for i in batch:
+		try:
+			batch_number = i.getAttribute('batch_id')
+			result = i.getAttribute('status')
+			return (str(batch_number), str(result))
+		except Exception, e:
+			luci_log.debug_verbose('batchAttemptResult: %s' % str(e))
+
+	try:
+		luci_log.debug_verbose('no batch with batchid and status found in \"%s\"' % doc.toxml())
+	except:
+		pass
+
+	return (None, None)
+	
 
 def getPayload(bt_node):
 	if not bt_node:
@@ -260,6 +275,20 @@
 	doc.appendChild(cl_node)
 	return doc
 
+def getClusterStatusBatch(rc):
+	batch_str ='<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
+	ricci_xml = rc.batch_run(batch_str, async=False)
+
+	if not ricci_xml or not ricci_xml.firstChild:
+		luci_log.debug_verbose('ricci_xml is None from batch_run')
+		
+	doc = getPayload(ricci_xml.firstChild)
+	if not doc or not doc.firstChild:
+		luci_log.debug_verbose('doc is None from getPayload: %s' % ricci_xml.toxml())
+		return None
+
+	return doc
+
 def setClusterConf(rc, clusterconf, propagate=True):
 	if propagate == True:
 		propg = 'true'
@@ -274,10 +303,7 @@
 	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"><var type="boolean" name="propagate" mutable="false" value="' + propg + '"/><var type="xml" mutable="false" name="cluster.conf">' + conf + '</var></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def getNodeLogs(rc):
 	errstr = 'log not accessible'
@@ -334,10 +360,7 @@
 	batch_str = '<module name="reboot"><request sequence="111" API_version="1.0"><function_call name="reboot_now"/></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def nodeLeaveCluster(rc, cluster_shutdown=False, purge=False):
 	cshutdown = 'false'
@@ -351,19 +374,13 @@
 	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="' + cshutdown + '"/><var mutable="false" name="purge_conf" type="boolean" value="' + purge_conf + '"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def nodeFence(rc, nodename):
 	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="' + nodename + '"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def nodeJoinCluster(rc, cluster_startup=False):
 	cstartup = 'false'
@@ -373,10 +390,7 @@
 	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="' + cstartup + '"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def startService(rc, servicename, preferrednode=None):
 	if preferrednode != None:
@@ -385,28 +399,19 @@
 		batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def restartService(rc, servicename):
 	batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def stopService(rc, servicename):
 	batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
-		return (None, None)
-	return batchAttemptResult(doc)
+	return batchAttemptResult(ricci_xml)
 
 def getDaemonStates(rc, dlist):
 	batch_str = '<module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">'
@@ -417,9 +422,10 @@
 	batch_str += '</var></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str, async=False)
-	if not ricci_xml:
+	if not ricci_xml or not ricci_xml.firstChild:
+		luci_log.debug_verbose('no ricci_xml in getDaemonStates')
 		return None
-	result = extractDaemonInfo(ricci_xml)
+	result = extractDaemonInfo(ricci_xml.firstChild)
 	return result
 
 def extractDaemonInfo(bt_node):
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/10/24 16:36:23	1.9.2.1
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/10/31 17:28:04	1.9.2.2
@@ -216,10 +216,12 @@
             luci_log.debug('An error occurred while trying to process the batch job: %s' % batch_xml_str)
             return None
 
-        return ricci_xml
+        doc = minidom.Document()
+        doc.appendChild(ricci_xml)
+        return doc
 
     def batch_report(self, batch_id):
-        luci_log.debug_verbose('[auth=%d] asking for batchid# %d for host %s' \
+        luci_log.debug_verbose('[auth=%d] asking for batchid# %s for host %s' \
             % (self.__authed, batch_id, self.__hostname))
 
         if not self.authed():
@@ -242,7 +244,7 @@
         if doc.firstChild.getAttribute('success') == '12':
             return None
         if doc.firstChild.getAttribute('success') != '0':
-            raise RicciError, 'Error while retrieving batch report for batch #%s from host %s' % (batch_id, self.__hostname)
+            raise RicciError, 'Error while retrieving batch report for batch #%d from host %s' % (batch_id, self.__hostname)
         batch_node = None
         for node in doc.firstChild.childNodes:
             if node.nodeType == xml.dom.Node.ELEMENT_NODE:
@@ -401,10 +403,10 @@
                     last = last + 1
                     last = last - 2 * last
     try:
-        luci_log.debug_verbose('Returning (%s, %s) for batch_status(\"%s\")' \
+        luci_log.debug_verbose('Returning (%d, %d) for batch_status(\"%s\")' \
             % (last, total, batch_xml.toxml()))
     except:
-        pass
+        luci_log.debug_verbose('Returning last, total')
 
     return (last, total)
 



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-10-16  4:26 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-10-16  4:26 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-16 04:26:19

Modified files:
	luci/cluster   : form-macros index_html resource-form-macros 
	luci/site/luci/Extensions: ricci_communicator.py 
	                           homebase_adapters.py 
	                           conga_constants.py ricci_bridge.py 
	                           cluster_adapters.py Variable.py 
	                           PropsObject.py 

Log message:
	all sorts of fixes and cleanups..

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.84&r2=1.85
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.18&r2=1.19
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&r1=1.20&r2=1.21
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.7&r2=1.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.31&r2=1.32
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.16&r2=1.17
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.27&r2=1.28
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.110&r2=1.111
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/Variable.py.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/PropsObject.py.diff?cvsroot=cluster&r1=1.1&r2=1.2

--- conga/luci/cluster/form-macros	2006/10/13 21:25:14	1.84
+++ conga/luci/cluster/form-macros	2006/10/16 04:26:19	1.85
@@ -58,21 +58,24 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? cluster list');
 	</script>
+
 <div id="cluster_list">
 <div class="cluster" tal:repeat="clu clusystems">
-	<tal:block tal:define="global ragent python: here.getRicciAgent(clu)" />
 
- <span tal:condition="python: ragent == ''">
-    <strong class="errmsgs">An error occurred when trying to contact any of the nodes in the <span tal:replace="python: clu[0]"/> cluster.</strong>
-  <hr/>
-  </span>
+	<tal:block tal:define="
+		global ragent python: here.getRicciAgent(clu[0])" />
 
-  <span tal:condition="python: ragent != ''">
-	<tal:block
-			tal:define="global stat python: here.getClusterStatus(ragent);
-			global cstatus python: here.getClustersInfo(stat,request);
+	<div tal:condition="python: not ragent">
+		<strong class="errmsgs">An error occurred when trying to contact any of the nodes in the <span tal:replace="python: clu[0]"/> cluster.</strong>
+		<hr/>
+	</div>
+
+	<tal:block tal:condition="python: ragent">
+		<tal:block tal:define="
+			global stat python: here.getClusterStatus(ragent);
+			global cstatus python: here.getClustersInfo(stat, request);
 			global cluster_status python: 'cluster ' + (('running' in cstatus and cstatus['running'] == 'true') and 'running' or 'stopped');"
- 	/>
+	 	/>
 
 	<table class="cluster" width="100%">
 	<tr class="cluster info_top">
@@ -151,7 +154,7 @@
 	</tr>
 	</table>
 	<hr>
- </span>
+ </tal:block>
 </div>
 </div>
 </div>
@@ -348,10 +351,8 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? Configure cluster properties');
 	</script>
-	<tal:comment tal:replace="nothing">
-		<span tal:define="global ricci_agent python: here.getRicciAgentForCluster(request)"/>
-	</tal:comment>
 
+	<span tal:define="global ricci_agent python: here.getRicciAgentForCluster(request)" />
 	<tal:block
 		tal:define="global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
@@ -1129,9 +1130,9 @@
 		global ricci_agent python: here.getRicciAgentForCluster(request);
 		global nodestatus python: here.getClusterStatus(ricci_agent);
 		global nodeinfo python: here.getNodeInfo(modelb, nodestatus, request);
-		global fenceinfo python: here.getFenceInfo(modelb, request);
 		global status_class python: 'node_' + (nodeinfo['nodestate'] == '0' and 'active' or (nodeinfo['nodestate'] == '1' and 'inactive' or 'unknown'));
-		global cluster_node_status_str python: (nodeinfo['nodestate'] == '0' and 'Cluster member' or (nodeinfo['nodestate'] == '1' and 'Currently not a cluster participant' or 'This node is not responding'));"
+		global cluster_node_status_str python: (nodeinfo['nodestate'] == '0' and 'Cluster member' or (nodeinfo['nodestate'] == '1' and 'Currently not a cluster participant' or 'This node is not responding'));
+		global fenceinfo python: here.getFenceInfo(modelb, request)"
 	/>
 
 	 <table class="cluster node" width="100%">
@@ -1317,10 +1318,11 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? nodes');
 	</script>
+
 <div id="node_list" tal:define="
 	global ricci_agent python: here.getRicciAgentForCluster(request);
 	global status python: here.getClusterStatus(ricci_agent);
-	global nds python: here.getNodesInfo(modelb,status,request)">
+	global nds python: here.getNodesInfo(modelb, status, request)">
 
 	<div tal:repeat="nd nds">
 		<tal:block
@@ -1532,6 +1534,7 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? services');
 	</script>
+
 	<tal:block tal:omit-tag=""
 		tal:define="
 			global ricci_agent python: here.getRicciAgentForCluster(request);
@@ -1657,8 +1660,11 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? services ??? Start a service');
 	</script>
-	<span tal:define="global ricci_agent python: here.getRicciAgentForCluster(request)"/>
-	<span tal:define="result python: here.serviceStart(ricci_agent, request)"/>
+
+	<tal:block tal:define="
+		global ricci_agent python: here.getRicciAgentForCluster(request);
+		result python: here.serviceStart(ricci_agent, request)" />
+
 	<!-- <span metal:use-macro="here/form-macros/macros/serviceconfig-form"/> -->
 </div>
 
@@ -1667,8 +1673,11 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? services ??? Restart a service');
 	</script>
-	<span tal:define="global ricci_agent python: here.getRicciAgentForCluster(request)"/>
-	<span tal:define="result python: here.serviceRestart(ricci_agent, request)"/>
+
+	<tal:block tal:define="
+		global ricci_agent python: here.getRicciAgentForCluster(request);
+		result python: here.serviceRestart(ricci_agent, request)" />
+
 	<!-- <span metal:use-macro="here/form-macros/macros/serviceconfig-form"/> -->
 </div>
 
@@ -1676,8 +1685,11 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? services ??? Stop a service');
 	</script>
-	<span tal:define="global ricci_agent python: here.getRicciAgentForCluster(request)"/>
-	<span tal:define="result python: here.serviceStop(ricci_agent,request)"/>
+
+	<span tal:define="
+		global ricci_agent python: here.getRicciAgentForCluster(request);
+		result python: here.serviceStop(ricci_agent, request)" />
+
 	<!-- <span metal:use-macro="here/form-macros/macros/serviceconfig-form"/> -->
 </div>
 
@@ -1720,7 +1732,7 @@
 		global ricci_agent python: here.getRicciAgentForCluster(request);
 		global global_resources python: here.getResourcesInfo(modelb, request);
 		global sstat python: here.getClusterStatus(ricci_agent);
-		global sinfo python: here.getServiceInfo(sstat, modelb,request);
+		global sinfo python: here.getServiceInfo(sstat, modelb, request);
 		global running sinfo/running | nothing;" />
 
 	<tal:block tal:replace="structure python: '<script type='+chr(0x22)+'text/javascript'+chr(0x22)+'>'" />
@@ -1880,11 +1892,11 @@
 	<script type="text/javascript">
 		set_page_title('Luci ??? cluster ??? failover domains');
 	</script>
-	<tal:block
-		tal:define="
-			global ragent python: here.getRicciAgentForCluster(request);
-			global sta python: here.getClusterStatus(ragent);
-			global fdominfo python: here.getFdomsInfo(modelb, request, sta);" />
+
+	<tal:block tal:define="
+		global ragent python: here.getRicciAgentForCluster(request);
+		global sta python: here.getClusterStatus(ragent);
+		global fdominfo python: here.getFdomsInfo(modelb, request, sta);" />
 
 	<div class="cluster fdom" tal:repeat="fdom fdominfo">
 	<div class="cluster fdom fdomname">
--- conga/luci/cluster/index_html	2006/10/09 16:16:11	1.18
+++ conga/luci/cluster/index_html	2006/10/16 04:26:19	1.19
@@ -34,9 +34,11 @@
     <span tal:condition="not: hascluster">
     <meta googaa="ooo"/>
     </span>
+
     <span tal:condition="hascluster">
       <span tal:define="ri_agent python:here.getRicciAgentForCluster(request)">
-        <span tal:define="resmap python:here.getClusterOS(ri_agent,request);
+
+        <span tal:define="resmap python:here.getClusterOS(ri_agent);
                           global isVirtualized resmap/isVirtualized;
                           global os_version resmap/os;"/>
       </span>
@@ -52,6 +54,7 @@
         <meta http-equiv="refresh" content="" tal:attributes="content isBusy/refreshurl"/> 
        </span>
     </span>
+
       <tal:comment replace="nothing"> A slot where you can insert elements in the header from a template </tal:comment>
     </metal:headslot>
 
@@ -156,10 +159,14 @@
       prefer layouts that don't use tables.
       </tal:comment>
     <!-- <div tal:define="global hascluster request/clustername |nothing"/>  -->
-    <span tal:condition="hascluster">
-    <span tal:define="global ricci_agent python:here.getRicciAgentForCluster(request)"/>
-    <div tal:omit-tag="" tal:define="global modelb python:here.getmodelbuilder(ricci_agent)" />
-    </span>
+
+    <tal:block tal:condition="hascluster">
+	    <tal:block tal:define="global ricci_agent python: here.getRicciAgentForCluster(request)" />
+		<tal:block tal:condition="ricci_agent"
+			tal:define="
+				global modelb python:here.getmodelbuilder(ricci_agent)" />
+    </tal:block>
+
       <table id="portal-columns">
         <tbody>
           <tr>
--- conga/luci/cluster/resource-form-macros	2006/10/09 16:16:11	1.20
+++ conga/luci/cluster/resource-form-macros	2006/10/16 04:26:19	1.21
@@ -93,9 +93,8 @@
 	<h2>Resources Remove Form</h2>
 
 	<tal:block tal:define="
-		ragent python: here.getRicciAgentForCluster(request);
-		msg python: here.delResource(request, ragent)">
-		<div tal:condition="msg" tal:content="msg" />
+		msg python: here.delResource(here.getRicciAgentForCluster(request), request)">
+		<span class="error" tal:condition="msg" tal:content="msg" />
 	</tal:block>
 </div>
 
@@ -243,7 +242,7 @@
 	<h2>Resource <span tal:replace="python: ('edit' in request and request['edit']) and 'Edited' or 'Added'" /></h2>
 
 	<div tal:content="
-		python: here.addResource(request, here.getRicciAgentForCluster(request))" />
+		python: here.addResource(here.getRicciAgentForCluster(request), request)" />
 </div>
 
 <div metal:define-macro="resourceconfig-form">
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/10/10 21:07:18	1.7
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/10/16 04:26:19	1.8
@@ -10,8 +10,7 @@
 from HelperFunctions import access_to_host_allowed
 
 
-CERTS_DIR_PATH='/var/lib/luci/var/certs/'
-
+CERTS_DIR_PATH = '/var/lib/luci/var/certs/'
 
 class RicciCommunicator:
     def __init__(self, hostname, port=11111):
@@ -38,14 +37,12 @@
         self.__reported_hostname = hello.firstChild.getAttribute('hostname')
         self.__os = hello.firstChild.getAttribute('os')
         self.__dom0 = hello.firstChild.getAttribute('xen_host') == 'true'
-        
+
         pass
     
     
     def hostname(self):
         return self.__hostname
-    
-    
     def authed(self):
         return self.__authed
     def system_name(self):
@@ -126,9 +123,22 @@
                     batch_node = node.cloneNode(True)
         if batch_node == None:
             raise 'missing <batch/> in ricci\'s response'
+
         return batch_node
     
-    
+    def batch_run(self, batch_str, async=True):
+        try:
+            batch_xml_str = '<?xml version="1.0" ?><batch>' + batch_str + '</batch>'
+            batch_xml = minidom.parseString(batch_xml_str).firstChild
+        except:
+            return None
+
+        try:
+            ricci_xml = self.process_batch(batch_xml, async)
+        except:
+            return None
+        return ricci_xml
+
     def batch_report(self, batch_id):
         if not self.authed():
             raise 'not authenticated'
@@ -339,5 +349,5 @@
                     elif status == '5':
                         return -103, 'module removed from schedule'
     
-    raise 'no ' + str(module_num) + 'th module in the batch, or malformed response'
+    raise Exception, str('no ' + str(module_num) + 'th module in the batch, or malformed response')
 
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/13 17:12:41	1.31
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/16 04:26:19	1.32
@@ -8,6 +8,7 @@
 import cgi
 
 from ricci_defines import *
+from ricci_bridge import getClusterConf
 from ricci_communicator import RicciCommunicator
 from ricci_communicator import CERTS_DIR_PATH
 from clusterOS import resolveOSType
@@ -1237,42 +1238,6 @@
 def havePermEditPerms(self):
 	return isAdmin(self) 
 
-def getClusterConf(rc):
-	doc = xml.dom.minidom.Document()
-	batch = doc.createElement('batch')
-	module = doc.createElement('module')
-	module.setAttribute('name', 'cluster')
-	request = doc.createElement('request')
-	request.setAttribute('API_version', '1.0')
-	call = doc.createElement('function_call')
-	call.setAttribute('name', 'get_cluster.conf')
-	request.appendChild(call)
-	module.appendChild(request)
-	batch.appendChild(module)
-
-	# temporary workaround for ricci bug
-	system_info = rc.system_name()
-	rc = RicciCommunicator(system_info)
-	# end workaround
-
-	try:
-		ret = rc.process_batch(batch)
-	except Exception, e:
-		return str(e)
-
-	if not ret:
-		return None
-
-	cur = ret
-	while len(cur.childNodes) > 0:
-		for i in cur.childNodes:
-			if i.nodeType == xml.dom.Node.ELEMENT_NODE:
-				if i.nodeName == 'var' and i.getAttribute('name') == 'cluster.conf':
-					return i.childNodes[1].cloneNode(True)
-				else:
-					cur = i
-	return None
-
 def getClusterConfNodes(clusterConfDom):
 	cur = clusterConfDom
 	clusterNodes = list()
@@ -1303,3 +1268,35 @@
 	if storageList:
 		ret[1] = storageList
 	return ret
+
+def getClusterNode(self, nodename, clustername):
+	try:
+		cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + str(clustername) + '/' + str(nodename))
+		return cluster_node
+	except:
+		return None
+
+def getStorageNode(self, nodename):
+	try:
+		storage_node = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + '/' + str(nodename))
+		return storage_node
+	except:
+		return None
+
+def setNodeFlag(self, node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		node.manage_changeProperties({ 'flags': flags | flag_mask })
+	except:
+		try:
+			node.manage_addProperty('flags', flag_mask, 'int')
+		except:
+			pass
+
+def delNodeFlag(self, node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		if flags & flag_mask != 0:
+			node.manage_changeProperties({ 'flags': flags & ~flag_mask })
+	except:
+		pass
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/10/12 20:48:48	1.16
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/10/16 04:26:19	1.17
@@ -94,6 +94,7 @@
 REDIRECT_MSG="  You will be redirected in 5 seconds. Please fasten your safety restraints."
 
 
+# Homebase-specific constants
 HOMEBASE_ADD_USER="1"
 HOMEBASE_ADD_SYSTEM="2"
 HOMEBASE_PERMS="3"
@@ -102,4 +103,9 @@
 HOMEBASE_ADD_CLUSTER="6"
 HOMEBASE_ADD_CLUSTER_INITIAL="7"
 
+# Cluster node exception attribute flags
+CLUSTER_NODE_NEED_AUTH = 0x01
+CLUSTER_NODE_NOT_MEMBER = 0x02
+CLUSTER_NODE_ADDED = 0x04
+
 PLONE_ROOT='luci'
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/13 17:04:11	1.27
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/16 04:26:19	1.28
@@ -1,689 +1,76 @@
-from time import *
-import os
-import sys
-from socket import *
 import xml
-import xml.dom
 from xml.dom import minidom
-from conga_constants import *
-from RicciReceiveError import RicciReceiveError
+from ricci_communicator import RicciCommunicator
 
+def checkBatch(rc, batch_id):
+	try:
+		batch = rc.batch_report(batch_id)
+		if batch is None:
+			return True
+	except:
+		return False
+
+	try:
+		batchid = batch.getAttribute('batch_id')
+		result = batch.getAttribute('status')
+	except:
+		return False
 
-class ricci_bridge:
-  def __init__(self, hostname, port=11111):
-    self.__hostname = hostname
-    self.__port = port
-    
-    
-  def process(self, xml_out):
-    CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="get_cluster.conf"/></request></module></batch></ricci>'
-        
-    docc = None
-    try:
-      doc = self.makeConnection(CLUSTER_STR)
-    except RicciReceiveError, r:
-      return None
-
-    #if doc == None:
-    #  print "Sorry, doc is None"
-    if doc != None:
-      bt_node = None
-      for node in doc.firstChild.childNodes:
-        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-          if node.nodeName == 'batch':
-            bt_node = node
-      if bt_node == None:
-        #print "bt_node == None"
-        doc = None
-      else:
-        #print doc.toxml()
-        mod_node = None
-        for node in bt_node.childNodes:
-            if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                if node.nodeName == 'module':
-                    mod_node = node
-        if mod_node == None:
-            #print "mod_node == None"
-            doc = None
-        else:
-            resp_node = None
-            for node in mod_node.childNodes:
-                if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    resp_node = node
-            if resp_node == None:
-                #print "resp_node == None"
-                doc = None
-            else:
-                fr_node = None
-                for node in resp_node.childNodes:
-                    if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                      fr_node = node
-                if fr_node == None:
-                    #print "fr_node == None"
-                    doc = None
-                else:
-                    varnode = None
-                    for node in fr_node.childNodes:
-                      if node.nodeName == 'var':
-                          varnode = node
-                          break
-                    if varnode == None:
-                       #print "varnode == None"
-                       doc = None
-                    else:
-                      cl_node = None
-                      for node in varnode.childNodes:
-                          if node.nodeName == 'cluster':
-                              cl_node = node
-                              break
-                      if cl_node == None:
-                        #print "cl_node == None"
-                        doc = None
-                      else:
-                          docc = minidom.Document()
-                          docc.appendChild(cl_node)
-
-    return docc
-    
-  def __sendall(self, str, ssl_sock):
-    #print str
-    s = str
-    while len(s) != 0:
-        pos = ssl_sock.write(s)
-        s = s[pos:]
-    return
-
-    
-  def __receive(self, ssl_sock):
-    doc = None
-    xml_in = ''
-    try:
-        while True:
-            buff = ssl_sock.read(1024)
-            if buff == '':
-                break
-            xml_in += buff
-            try:
-                doc = minidom.parseString(xml_in)
-                break
-            except:
-                pass
-    except:
-        pass
-    try:
-        #print 'try parse xml'
-        doc = minidom.parseString(xml_in)
-        #print 'xml is good'
-    except:
-        pass
-    return doc
-
-  def getClusterStatus(self):
-    CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module></batch></ricci>'
-    # socket
-    sock = socket(AF_INET, SOCK_STREAM)
-    try:
-      sock.connect((self.__hostname, self.__port))
-    except:
-      sock.close()
-      return ''
-
-    ss = 0
-    try:
-        ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
-    except sslerror, e:
-        if ss:
-           del ss
-        sock.close()
-        return ''
-
-    # receive ricci header
-    hello = self.__receive(ss)
-    if hello != None:
-        pass
-        #print hello.toxml()
-
-    try:
-        self.__sendall(CLUSTER_STR, ss)
-        doc = self.__receive(ss)
-    except sslerror, e:
-        doc = None
-
-    del ss
-    sock.close()
-
-    if doc == None:
-      return ''
-      #print "Sorry, doc is None"
-
-    payload = self.extractPayload(doc)
-    return payload
-
-  def startService(self,servicename, preferrednode = None):
-    if preferrednode != None:
-      QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/><var mutable="false" name="nodename" type="string" value=\"' + preferrednode + '\" /></function_call></request></module></batch></ricci>'
-    else:
-      QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-      
-
-    batch_number, result = self.batchAttemptResult(payload)
-    return (batch_number, result)
-
-  def restartService(self,servicename):
-    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-      
-
-    batch_number, result = self.batchAttemptResult(payload)
-    return (batch_number, result)
-
-
-  def stopService(self,servicename):
-    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-      
-
-    batch_number, result = self.batchAttemptResult(payload)
-    return (batch_number, result)
-
-  def getDaemonStates(self, dlist):
-    CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">'
-                                                                                
-    for item in dlist:
-      CLUSTER_STR = CLUSTER_STR + '<service name=\"' + item + '\"/>'
-                                                                                
-    CLUSTER_STR = CLUSTER_STR + '</var></function_call></request></module></batch></ricci>'
-                                                                                
-    try:
-      payload = self.makeConnection(CLUSTER_STR)
-    except RicciReceiveError, r:
-      return None
-                                                                                
-    result = self.extractDaemonInfo(payload)
-                                                                              
-    return result
-                                                                              
-  def makeConnection(self,query_str):
-    # socket
-    sock = socket(AF_INET, SOCK_STREAM)
-    try:
-        sock.connect((self.__hostname, self.__port))
-    except:
-        sock.close()
-        return None
-
-    ss = 0
-    try:
-        ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
-        hello = self.__receive(ss)
-        #print >> sys.stderr, hello.toxml()
-        self.__sendall(query_str, ss)
-        # receive response
-        payload = self.__receive(ss)
-    except sslerror, e:
-        payload = None
-
-    if ss: 
-        del ss
-    sock.close()
-
-    if payload == None:
-      raise RicciReceiveError('FATAL',"Unable to receive ricci data for %s" % self.__hostname)
-    return payload
-
-
-  def extractPayload(self, doc):
-    docc = None
-    bt_node = None
-    for node in doc.firstChild.childNodes:
-      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-        if node.nodeName == 'batch':
-          bt_node = node
-    if bt_node == None:
-      doc = None
-    else:
-      #print doc.toxml()
-      mod_node = None
-      for node in bt_node.childNodes:
-          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'module':
-                  mod_node = node
-      if mod_node == None:
-          doc = None
-      else:
-          resp_node = None
-          for node in mod_node.childNodes:
-              if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                  resp_node = node
-          if resp_node == None:
-              doc = None
-          else:
-              fr_node = None
-              for node in resp_node.childNodes:
-                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    fr_node = node
-              if fr_node == None:
-                  doc = None
-              else:
-                  varnode = None
-                  for node in fr_node.childNodes:
-                    if node.nodeName == 'var':
-                        varnode = node
-                        break
-                  if varnode == None:
-                     doc = None
-                  else:
-                    cl_node = None
-                    for node in varnode.childNodes:
-                        if node.nodeName == 'cluster':
-                            cl_node = node
-                            break
-                    if cl_node == None:
-                      doc = None
-                    else:
-                        docc = minidom.Document()
-                        docc.appendChild(cl_node)
-    return docc
-
-
-  def getBatchResult(self, doc):
-    docc = None
-    bt_node = None
-    for node in doc.firstChild.childNodes:
-      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-        if node.nodeName == 'batch':
-          bt_node = node
-    if bt_node == None:
-      doc = None
-    else:
-      #print doc.toxml()
-      mod_node = None
-      for node in bt_node.childNodes:
-          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'module':
-                  mod_node = node
-      if mod_node == None:
-          doc = None
-      else:
-          resp_node = None
-          for node in mod_node.childNodes:
-              if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                  resp_node = node
-          if resp_node == None:
-              doc = None
-          else:
-              fr_node = None
-              for node in resp_node.childNodes:
-                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    fr_node = node
-              if fr_node == None:
-                  doc = None
-              else:
-                  varnode = None
-                  for node in fr_node.childNodes:
-                    if node.nodeName == 'var':
-                        varnode = node
-                        break
-                  if varnode == None:
-                     doc = None
-                  else:
-                    cl_node = None
-                    for node in varnode.childNodes:
-                        if node.nodeName == 'cluster':
-                            cl_node = node
-                            break
-                    if cl_node == None:
-                      doc = None
-                    else:
-                      docc = minidom.Document()
-                      docc.appendChild(cl_node)
-    return docc
-
-  def extractClusterConf(self, doc):
-    docc = None
-    bt_node = None
-    for node in doc.firstChild.childNodes:
-      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-        if node.nodeName == 'batch':
-          bt_node = node
-    if bt_node == None:
-      #print "bt_node == None"
-      doc = None
-    else:
-      #print doc.toxml()
-      mod_node = None
-      for node in bt_node.childNodes:
-          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'module':
-                  mod_node = node
-      if mod_node == None:
-          #print "mod_node == None"
-          doc = None
-      else:
-          resp_node = None
-          for node in mod_node.childNodes:
-              if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                  resp_node = node
-          if resp_node == None:
-              #print "resp_node == None"
-              doc = None
-          else:
-              fr_node = None
-              for node in resp_node.childNodes:
-                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    fr_node = node
-              if fr_node == None:
-                  #print "fr_node == None"
-                  doc = None
-              else:
-                  varnode = None
-                  for node in fr_node.childNodes:
-                    if node.nodeName == 'var':
-                        varnode = node
-                        break
-                  if varnode == None:
-                     #print "varnode == None"
-                     doc = None
-                  else:
-                    cl_node = None
-                    for node in varnode.childNodes:
-                        if node.nodeName == 'cluster':
-                            cl_node = node
-                            break
-                    if cl_node == None:
-                      #print "cl_node == None"
-                      doc = None
-                    else:
-                        docc = minidom.Document()
-                        docc.appendChild(cl_node)
-
-    return docc
-
-  def extractDaemonInfo(self, doc):
-    resultlist = list()
-    docc = None
-    bt_node = None
-    for node in doc.firstChild.childNodes:
-      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-        if node.nodeName == 'batch':
-          bt_node = node
-    if bt_node == None:
-      #print "bt_node == None"
-      doc = None
-    else:
-      #print doc.toxml()
-      mod_node = None
-      for node in bt_node.childNodes:
-          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'module':
-                  mod_node = node
-      if mod_node == None:
-          #print "mod_node == None"
-          doc = None
-      else:
-          resp_node = None
-          for node in mod_node.childNodes:
-              if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                  resp_node = node
-          if resp_node == None:
-              #print "resp_node == None"
-              doc = None
-          else:
-              fr_node = None
-              for node in resp_node.childNodes:
-                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    fr_node = node
-              if fr_node == None:
-                  #print "fr_node == None"
-                  doc = None
-              else:
-                  varnode = None
-                  for node in fr_node.childNodes:
-                    if node.nodeName == 'var':
-                        varnode = node
-                        break
-                  if varnode == None:
-                     #print "varnode == None"
-                     doc = None
-                  else:
-                    svc_node = None
-                    for node in varnode.childNodes:
-                        if node.nodeName == 'service':
-                            svchash = {}
-                            svchash['name'] = node.getAttribute('name')
-                            svchash['enabled'] = node.getAttribute('enabled')
-                            svchash['running'] = node.getAttribute('running')
-                            resultlist.append(svchash)
-                                                                          
-    return resultlist
-
-  def batchAttemptResult(self, doc):
-    docc = None
-    rc_node = None
-    for node in doc.firstChild.childNodes:
-        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-            if node.nodeName == 'batch':
-                #get batch number and status code
-                batch_number = node.getAttribute('batch_id')
-                result = node.getAttribute('status')
-                return (batch_number, result)
-            else:
-                #print "RETURNING NONE!!!"
-                return (None, None )
-
-
-        
-  def getRicciResponse(self):
-    sock = socket(AF_INET, SOCK_STREAM)
-    sock.settimeout(2.0)
-    try:
-      sock.connect((self.__hostname, self.__port))
-    except:
-      sock.close()
-      return False
-
-    ss = 0
-    try:
-        ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
-    except sslerror, e:
-        if ss:
-           del ss
-        sock.close()
-        return False
-    sock.settimeout(600.0) # 10 minutes
-    # TODO: data transfer timeout should be much less, 
-    # leave until all calls are async ricci calls
-
-    # receive ricci header
-    try:
-      hello = self.__receive(ss)
-    except sslerror, e:
-      hello = None
-
-    del ss
-    sock.close()
-
-    if hello != None:
-      return True
-    else:
-      return False
-
-  def checkBatch(self, batch_id):
-    QUERY_STR = '<?xml version="1.0" ?><ricci version="1.0" function="batch_report" batch_id="' + batch_id + '"/>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-        
-    #return true if finished or not present
-    success = payload.firstChild.getAttribute('success')
-    if success != "0":
-        return True  #I think this is ok...if id cannot be found
-    for node in payload.firstChild.childNodes:
-        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-            if node.nodeName == 'batch':
-                #get batch number and status code
-                batch_number = node.getAttribute('batch_id')
-                result = node.getAttribute('status')
-                if result == "0":
-                    return True
-                else:
-                    return False
-            else:
-                return False
-
-    return False
-
-  def setClusterConf(self, clusterconf, propagate=True):
-    if propagate == True:
-      propg = "true"
-    else:
-      propg = "false"
-
-    conf = str(clusterconf).replace('<?xml version="1.0"?>', '')
-    conf = conf.replace('<?xml version="1.0" ?>', '')
-    conf = conf.replace('<? xml version="1.0"?>', '')
-    conf = conf.replace('<? xml version="1.0" ?>', '')
-    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"><var type="boolean" name="propagate" mutable="false" value="' + propg + '"/><var type="xml" mutable="false" name="cluster.conf">' + conf + '</var></function_call></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-                                                                            
-                                                                            
-    batch_number, result = self.batchAttemptResult(payload)
-    return (batch_number, result)
-
-  def nodeLeaveCluster(self, cluster_shutdown=False):
-    cshutdown = "false"
-    if cluster_shutdown == True:
-      cshutdown = "true"
-    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="111" API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="' + cshutdown + '"/></function_call></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-
-    batch_number, result = self.batchAttemptResult(payload)
-
-    return (batch_number, result)
-                                                                            
-  def nodeJoinCluster(self, cluster_startup=False):
-    cstartup = "false"
-    if cluster_startup == True:
-      cstartup = "true"
-    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="111" API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="' + cstartup + '"/></function_call></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-
-    batch_number, result = self.batchAttemptResult(payload)
-
-    return (batch_number, result)
-
-  def nodeReboot(self):
-    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="reboot"><request sequence="111" API_version="1.0"><function_call name="reboot_now"/></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-
-    batch_number, result = self.batchAttemptResult(payload)
-
-    return (batch_number, result)
-
-  def nodeFence(self, nodename):
-    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="111" API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="' + nodename + '"/></function_call></request></module></batch></ricci>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return None
-
-    batch_number, result = self.batchAttemptResult(payload)
-
-    return (batch_number, result)
-
-  def getNodeLogs(self):
-    QUERY_STR = '<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="log"><request sequence="1254" API_version="1.0"><function_call name="get"><var mutable="false" name="age" type="int" value="360000"/><var mutable="false" name="tags" type="list_str"><listentry value="cluster"/></var></function_call></request></module>'
-
-    try:
-      payload = self.makeConnection(QUERY_STR)
-    except RicciReceiveError, r:
-      return "log not accessible"
+	if result == '0':
+		return True
 
-    #parse out log entry  
-    return payload
+	return False
 
 def addClusterNodeBatch(cluster_name,
-                        install_base,
-                        install_services,
-                        install_shared_storage,
-                        install_LVS,
-                        upgrade_rpms):
+						install_base,
+						install_services,
+						install_shared_storage,
+						install_LVS,
+						upgrade_rpms):
+
 	batch = '<?xml version="1.0" ?>'
 	batch += '<batch>'
-        
-        
 	batch += '<module name="rpm">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="install">'
 	batch += '<var name="upgrade" type="boolean" value="'
-        if upgrade_rpms:
-          batch += 'true'
-        else:
-          batch += 'false'
-        batch += '"/>'
+	if upgrade_rpms:
+		batch += 'true'
+	else:
+		batch += 'false'
+	batch += '"/>'
+
 	batch += '<var name="sets" type="list_xml">'
-        if install_base or install_services or install_shared_storage:
-          batch += '<set name="Cluster Base"/>'
-        if install_services:
-          batch += '<set name="Cluster Service Manager"/>'
+
+	if install_base or install_services or install_shared_storage:
+		batch += '<set name="Cluster Base"/>'
+	if install_services:
+		batch += '<set name="Cluster Service Manager"/>'
 	if install_shared_storage:
-          batch += '<set name="Clustered Storage"/>'
+		batch += '<set name="Clustered Storage"/>'
 	if install_LVS:
-          batch += '<set name="Linux Virtual Server"/>'
+		batch += '<set name="Linux Virtual Server"/>'
+
 	batch += '</var>'
 	batch += '</function_call>'
 	batch += '</request>'
 	batch += '</module>'
-        
-        
+		
 	need_reboot = install_base or install_services or install_shared_storage or install_LVS
-        if need_reboot:
-          batch += '<module name="reboot">'
-          batch += '<request API_version="1.0">'
-          batch += '<function_call name="reboot_now"/>'
-          batch += '</request>'
-          batch += '</module>'
-        else:
-          # need placeholder instead of reboot
-          batch += '<module name="rpm">'
-          batch += '<request API_version="1.0">'
-          batch += '<function_call name="install"/>'
-          batch += '</request>'
-          batch += '</module>'
-        
-        
+	if need_reboot:
+		batch += '<module name="reboot">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="reboot_now"/>'
+		batch += '</request>'
+		batch += '</module>'
+	else:
+		# need placeholder instead of reboot
+		batch += '<module name="rpm">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="install"/>'
+		batch += '</request>'
+		batch += '</module>'
+		
 	batch += '<module name="cluster">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="set_cluster.conf">'
@@ -700,121 +87,389 @@
 	batch += '</function_call>'
 	batch += '</request>'
 	batch += '</module>'
-        
-        
+
 	batch += '<module name="cluster">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="start_node"/>'
 	batch += '</request>'
 	batch += '</module>'
-        
-        
 	batch += '</batch>'
 
 	return minidom.parseString(batch).firstChild
 
-def createClusterBatch(os_str,
-                       cluster_name,
-                       cluster_alias,
-                       nodeList,
-                       install_base,
-                       install_services,
-                       install_shared_storage,
-                       install_LVS,
-                       upgrade_rpms):
-    batch = '<?xml version="1.0" ?>'
-    batch += '<batch>'
-
-    if os_str == 'rhel5':
-        cluster_version = '5'
-    elif os_str == 'rhel4':
-        cluster_version = '4'
-    else:
-        cluster_version = 'unknown'
-
-    batch += '<module name="rpm">'
-    batch += '<request API_version="1.0">'
-    batch += '<function_call name="install">'
-    batch += '<var name="upgrade" type="boolean" value="'
-    if upgrade_rpms:
-      batch += 'true'
-    else:
-      batch += 'false'
-    batch += '"/>'
-    batch += '<var name="sets" type="list_xml">'
-    if install_base or install_services or install_shared_storage:
-      batch += '<set name="Cluster Base"/>'
-    if install_services:
-      batch += '<set name="Cluster Service Manager"/>'
-    if install_shared_storage:
-      batch += '<set name="Clustered Storage"/>'
-    if install_LVS:
-      batch += '<set name="Linux Virtual Server"/>'
-    batch += '</var>'
-    batch += '</function_call>'
-    batch += '</request>'
-    batch += '</module>'
-    
-    
-    need_reboot = install_base or install_services or install_shared_storage or install_LVS
-    if need_reboot:
-      batch += '<module name="reboot">'
-      batch += '<request API_version="1.0">'
-      batch += '<function_call name="reboot_now"/>'
-      batch += '</request>'
-      batch += '</module>'
-    else:
-      # need placeholder instead of reboot
-      batch += '<module name="rpm">'
-      batch += '<request API_version="1.0">'
-      batch += '<function_call name="install"/>'
-      batch += '</request>'
-      batch += '</module>'
-    
-      
-    batch += '<module name="cluster">'
-    batch += '<request API_version="1.0">'
-    batch += '<function_call name="set_cluster.conf">'
-    batch += '<var mutable="false" name="propagate" type="boolean" value="false"/>'
-    batch += '<var mutable="false" name="cluster.conf" type="xml">'
-    batch += '<cluster config_version="1" name="' + cluster_name + '" alias="' + cluster_alias + '">'
-    batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
-
-    batch += '<clusternodes>'
-    x = 1
-    for i in nodeList:
-        if os_str == "rhel4":
-          batch += '<clusternode name="' + i + '" votes="1" />'
-        else:
-          batch += '<clusternode name="' + i + '" votes="1" nodeid="' + str(x) + '" />'
-        x = x + 1
-
-    batch += '</clusternodes>'
-
-    if len(nodeList) == 2:
-        batch += '<cman expected_votes="1" two_node="1"/>'
-    else:
-        batch += '<cman/>'
+def createClusterBatch(	os_str,
+						cluster_name,
+						cluster_alias,
+						nodeList,
+						install_base,
+						install_services,
+						install_shared_storage,
+						install_LVS,
+						upgrade_rpms):
+
+	if os_str == 'rhel5':
+		cluster_version = '5'
+	elif os_str == 'rhel4':
+		cluster_version = '4'
+	else:
+		cluster_version = 'unknown'
+
+	batch = '<?xml version="1.0" ?>'
+	batch += '<batch>'
+	batch += '<module name="rpm">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="install">'
+	batch += '<var name="upgrade" type="boolean" value="'
+	if upgrade_rpms:
+		batch += 'true'
+	else:
+		batch += 'false'
+	batch += '"/>'
+	batch += '<var name="sets" type="list_xml">'
+
+	if install_base or install_services or install_shared_storage:
+		batch += '<set name="Cluster Base"/>'
+	if install_services:
+		batch += '<set name="Cluster Service Manager"/>'
+	if install_shared_storage:
+		batch += '<set name="Clustered Storage"/>'
+	if install_LVS:
+		batch += '<set name="Linux Virtual Server"/>'
+	batch += '</var>'
+	batch += '</function_call>'
+	batch += '</request>'
+	batch += '</module>'
+
+	need_reboot = install_base or install_services or install_shared_storage or install_LVS
+	if need_reboot:
+		batch += '<module name="reboot">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="reboot_now"/>'
+		batch += '</request>'
+		batch += '</module>'
+	else:
+		# need placeholder instead of reboot
+		batch += '<module name="rpm">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="install"/>'
+		batch += '</request>'
+		batch += '</module>'
+
+	batch += '<module name="cluster">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="set_cluster.conf">'
+	batch += '<var mutable="false" name="propagate" type="boolean" value="false"/>'
+	batch += '<var mutable="false" name="cluster.conf" type="xml">'
+	batch += '<cluster config_version="1" name="' + cluster_name + '" alias="' + cluster_alias + '">'
+	batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
+
+	batch += '<clusternodes>'
+
+	x = 1
+	for i in nodeList:
+		if os_str == "rhel4":
+			batch += '<clusternode name="' + i + '" votes="1" />'
+		else:
+			batch += '<clusternode name="' + i + '" votes="1" nodeid="' + str(x) + '" />'
+		x = x + 1
+
+	batch += '</clusternodes>'
+
+	if len(nodeList) == 2:
+		batch += '<cman expected_votes="1" two_node="1"/>'
+	else:
+		batch += '<cman/>'
  
-    batch += '<fencedevices/>'
-    batch += '<rm/>'
-    batch += '</cluster>'
-    batch += '</var>'
-    batch += '</function_call>'
-    batch += '</request>'
-    batch += '</module>'
-    
-    
-    batch += '<module name="cluster">'
-    batch += '<request API_version="1.0">'
-    batch += '<function_call name="start_node">'
-    batch += '<var mutable="false" name="cluster_startup" type="boolean" value="true"/>'
-    batch += '</function_call>'
-    batch += '</request>'
-    batch += '</module>'
-    
-    
-    batch += '</batch>'
-    
-    return minidom.parseString(batch).firstChild
+	batch += '<fencedevices/>'
+	batch += '<rm/>'
+	batch += '</cluster>'
+	batch += '</var>'
+	batch += '</function_call>'
+	batch += '</request>'
+	batch += '</module>'
+
+	batch += '<module name="cluster">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="start_node">'
+	batch += '<var mutable="false" name="cluster_startup" type="boolean" value="true"/>'
+	batch += '</function_call>'
+	batch += '</request>'
+	batch += '</module>'
+	batch += '</batch>'
+
+	return minidom.parseString(batch).firstChild
 
+def batchAttemptResult(self, doc):
+	docc = None
+	rc_node = None
+
+	for node in doc.firstChild.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			if node.nodeName == 'batch':
+				#get batch number and status code
+				batch_number = node.getAttribute('batch_id')
+				result = node.getAttribute('status')
+				return (batch_number, result)
+			else:
+				#print "RETURNING NONE!!!"
+				return (None, None)
+
+def getPayload(bt_node):
+	if not bt_node:
+		return None
+
+	mod_node = None
+	for node in bt_node.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE and node.nodeName == 'module':
+			mod_node = node
+	if not mod_node:
+		return None
+
+	resp_node = None
+	for node in mod_node.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			resp_node = node
+	if not resp_node:
+		return None
+
+	fr_node = None
+	for node in resp_node.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			fr_node = node
+	if not fr_node:
+		return None
+
+	varnode = None
+	for node in fr_node.childNodes:
+		if node.nodeName == 'var':
+			varnode = node
+			break
+	if not varnode:
+		return None
+
+	cl_node = None
+	for node in varnode.childNodes:
+		if node.nodeName == 'cluster':
+			cl_node = node
+			break
+	if not cl_node:
+		return None
+
+	doc = minidom.Document()
+	doc.appendChild(cl_node)
+	return doc
+
+def setClusterConf(rc, clusterconf, propagate=True):
+	if propagate == True:
+		propg = 'true'
+	else:
+		propg = 'false'
+
+	conf = str(clusterconf).replace('<?xml version="1.0"?>', '')
+	conf = conf.replace('<?xml version="1.0" ?>', '')
+	conf = conf.replace('<? xml version="1.0"?>', '')
+	conf = conf.replace('<? xml version="1.0" ?>', '')
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"><var type="boolean" name="propagate" mutable="false" value="' + propg + '"/><var type="xml" mutable="false" name="cluster.conf">' + conf + '</var></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def getNodeLogs(rc):
+	errstr = 'log not accessible'
+
+	batch_str = '<module name="log"><request sequence="1254" API_version="1.0"><function_call name="get"><var mutable="false" name="age" type="int" value="360000"/><var mutable="false" name="tags" type="list_str"><listentry value="cluster"/></var></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return errstr
+	return doc.firstChild
+
+def nodeReboot(rc):
+	batch_str = '<module name="reboot"><request sequence="111" API_version="1.0"><function_call name="reboot_now"/></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def nodeLeaveCluster(rc, cluster_shutdown=False, purge=True):
+	cshutdown = 'false'
+	if cluster_shutdown == True:
+		cshutdown = 'true'
+
+	purge_conf = 'true'
+	if purge == False:
+		purge_conf = 'false'
+
+	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="' + cshutdown + '"/><var mutable="false" name="purge_conf" type="boolean" value="' + purge_conf + '"/></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def nodeFence(rc, nodename):
+	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="' + nodename + '"/></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def nodeJoinCluster(rc, cluster_startup=False):
+	cstartup = 'false'
+	if cluster_startup == True:
+		cstartup = 'true'
+
+	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="' + cstartup + '"/></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def startService(rc, servicename, preferrednode=None):
+	if preferrednode != None:
+		batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/><var mutable="false" name="nodename" type="string" value=\"' + preferrednode + '\" /></function_call></request></module>'
+	else:
+		batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def restartService(rc, servicename):
+	batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def stopService(rc, servicename):
+	batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return (None, None)
+	return batchAttemptResult(doc)
+
+def getDaemonStates(rc, dlist):
+	batch_str = '<module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">'
+
+	for item in dlist:
+		batch_str += '<service name=\"' + item + '\"/>'
+
+	batch_str += '</var></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml:
+		return None
+	result = extractDaemonInfo(ricci_xml)
+	return result
+
+def extractDaemonInfo(bt_node):
+	if not bt_node:
+		return None
+
+	mod_node = None
+	for node in bt_node.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			if node.nodeName == 'module':
+				mod_node = node
+	if not mod_node:
+		return None
+
+	resp_node = None
+	for node in mod_node.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			resp_node = node
+	if not resp_node:
+		return None
+
+	fr_node = None
+	for node in resp_node.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			fr_node = node
+	if not fr_node:
+		return None
+
+	varnode = None
+	for node in fr_node.childNodes:
+		if node.nodeName == 'var':
+			varnode = node
+			break
+	if not varnode:
+		return None
+
+	resultlist = list()
+	svc_node = None
+	for node in varnode.childNodes:
+		if node.nodeName == 'service':
+			svchash = {}
+			try:
+				name = node.getAttribute('name')
+				if not name:
+					raise
+			except:
+				name = '[unknown]'
+			svchash['name'] = name
+
+			try:
+				svc_enabled = node.getAttribute('enabled')
+			except:
+				svc_enabled = '[unknown]'
+			svchash['enabled'] = svc_enabled
+
+			try:
+				running = node.getAttribute('running')
+			except:
+				running = '[unknown]'
+			svchash['running'] = running
+			resultlist.append(svchash)
+
+	return resultlist
+
+def getClusterConf(rc):
+	doc = xml.dom.minidom.Document()
+	batch = doc.createElement('batch')
+	module = doc.createElement('module')
+	module.setAttribute('name', 'cluster')
+	request = doc.createElement('request')
+	request.setAttribute('API_version', '1.0')
+	call = doc.createElement('function_call')
+	call.setAttribute('name', 'get_cluster.conf')
+	request.appendChild(call)
+	module.appendChild(request)
+	batch.appendChild(module)
+
+	try:
+		ret = rc.process_batch(batch)
+	except Exception, e:
+		return None
+
+	if not ret:
+		return None
+
+	cur = ret
+	while len(cur.childNodes) > 0:
+		for i in cur.childNodes:
+			if i.nodeType == xml.dom.Node.ELEMENT_NODE:
+				if i.nodeName == 'var' and i.getAttribute('name') == 'cluster.conf':
+					return i.childNodes[1].cloneNode(True)
+				else:
+					cur = i
+	return None
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/13 22:56:28	1.110
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 04:26:19	1.111
@@ -6,6 +6,7 @@
 from conga_constants import *
 from ricci_bridge import *
 from ricci_communicator import *
+from string import lower
 import time
 import Products.ManagedSystem
 from Products.Archetypes.utils import make_uuid
@@ -20,7 +21,7 @@
 from clusterOS import resolveOSType
 from GeneralError import GeneralError
 from UnknownClusterError import UnknownClusterError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster
+from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -103,6 +104,7 @@
 
 	return [errors, cluster_properties]
 
+
 def validateCreateCluster(self, request):
 	errors = list()
 	messages = list()
@@ -117,7 +119,7 @@
 		sessionData = None
 
 	if not 'clusterName' in request.form or not request.form['clusterName']:
-		return (False, {'errors': [ 'No cluster name was specified.' ] })
+		return (False, {'errors': [ 'No cluster name was specified.' ]})
 	clusterName = request.form['clusterName']
 
 	try:
@@ -177,14 +179,15 @@
 
 	if cluster_properties['isComplete'] == True:
 		batchNode = createClusterBatch(cluster_os,
-					       clusterName,
-					       clusterName,
-					       map(lambda x: x['ricci_host'], nodeList),
-					       True,
-					       True,
-					       enable_storage,
-					       False,
-					       rhn_dl)
+						clusterName,
+						clusterName,
+						map(lambda x: x['ricci_host'], nodeList),
+						True,
+						True,
+						enable_storage,
+						False,
+						rhn_dl)
+
 		if not batchNode:
 			nodeUnauth(nodeList)
 			cluster_properties['isComplete'] = False
@@ -210,7 +213,6 @@
 				cluster_properties['isComplete'] = False
 				errors.append('An error occurred while attempting to add cluster node \"' + i['ricci_host'] + '\"')
 				return (False, {'errors': errors, 'requestResults':cluster_properties })
-
 		buildClusterCreateFlags(self, batch_id_map, clusterName)
 
 	messages.append('Creation of cluster \"' + clusterName + '\" has begun')
@@ -1203,248 +1205,338 @@
   return clist
 
 def cluster_permission_check(cluster):
-  #Does this take too long?
-  sm = AccessControl.getSecurityManager()
-  user =  sm.getUser()
-  if user.has_permission("View",cluster):
-    return True
+	#Does this take too long?
+	try:
+		sm = AccessControl.getSecurityManager()
+		user = sm.getUser()
+		if user.has_permission('View', cluster):
+			return True
+	except:
+		pass
+	return False
 
-  return False
+def getRicciAgent(self, clustername):
+	#Check cluster permission here! return none if false
+	path = CLUSTER_FOLDER_PATH + clustername
 
-def getRicciAgentForCluster(self, req):
-  clustername = req['clustername']
-  #Check cluster permission here! return none if false
-  path = CLUSTER_FOLDER_PATH + clustername
-  clusterfolder = self.restrictedTraverse(path)
-  if clusterfolder != None:
-    nodes = clusterfolder.objectItems('Folder')
-    for node in nodes:
-      rb = ricci_bridge(node[1].getId())
-      if rb.getRicciResponse() == True:
-        return node[1].getId()
-    return None
-  else:
-    return None
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			raise
+		nodes = clusterfolder.objectItems('Folder')
+		if len(nodes) < 1:
+			return None
+	except:
+		return None
 
-def getRicciAgent(self, clustername):
-  #Check cluster permission here! return none if false
-  path = CLUSTER_FOLDER_PATH + clustername[0]
-  clusterfolder = self.restrictedTraverse(path)
-  if clusterfolder != None:
-    nodes = clusterfolder.objectItems('Folder')
-    for node in nodes:
-      rb = ricci_bridge(node[1].getId())
-      if rb.getRicciResponse() == True:
-        return node[1].getId()
-    return ""
-  else:
-    return ""
+	cluname = lower(clustername)
 
+	for node in nodes:
+		try:
+			hostname = node[1].getId()
+		except:
+			try:
+				hostname = node[0]
+			except:
+				continue
 
-def getClusterStatus(self, ricci_name):
-  rb = ricci_bridge(ricci_name)
-  doc = rb.getClusterStatus()
-  results = list()
+		try:
+			rc = RicciCommunicator(hostname)
+			if not rc:
+				raise
+		except:
+			#raise Exception, ('unable to communicate with the ricci agent on %s', hostname)
+			continue
 
-  if not doc or not doc.firstChild:
-    return {}
+		try:
+			clu_info = rc.cluster_info()
+			if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
+				# node reports it's in a different cluster
+				raise
+		except:
+			continue
 
-  vals = {}
-  vals['type'] = "cluster"
-  try:
-    vals['alias'] = doc.firstChild.getAttribute('alias')
-  except AttributeError, e:
-    vals['alias'] = doc.firstChild.getAttribute('name')
-  vals['votes'] = doc.firstChild.getAttribute('votes')
-  vals['name'] = doc.firstChild.getAttribute('name')
-  vals['minQuorum'] = doc.firstChild.getAttribute('minQuorum')
-  vals['quorate'] = doc.firstChild.getAttribute('quorate')
-  results.append(vals)
-  for node in doc.firstChild.childNodes:
-    if node.nodeName == 'node':
-      vals = {}
-      vals['type'] = "node"
-      vals['clustered'] = node.getAttribute('clustered')
-      vals['name'] = node.getAttribute('name')
-      vals['online'] = node.getAttribute('online')
-      vals['uptime'] = node.getAttribute('uptime')
-      vals['votes'] = node.getAttribute('votes')
-      results.append(vals)
-    elif node.nodeName == 'service':
-      vals = {}
-      vals['type'] = 'service'
-      vals['name'] = node.getAttribute('name')
-      vals['nodename'] = node.getAttribute('nodename')
-      vals['running'] = node.getAttribute('running')
-      vals['failed'] = node.getAttribute('failed')
-      vals['autostart'] = node.getAttribute('autostart')
-      results.append(vals)
+		if rc.authed():
+			return rc
+		setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+	return None
 
-  return results
+def getRicciAgentForCluster(self, req):
+	try:
+		clustername = req['clustername']
+	except KeyError, e:
+		try:
+			clustername = req.form['clusterName']
+			if not clustername:
+				raise
+		except:
+			return None
+	return getRicciAgent(self, clustername)
+
+def getClusterStatus(self, rc):
+	clustatus_batch ='<?xml version="1.0" ?><batch><module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module></batch>'
+	try:
+		clustatuscmd_xml = minidom.parseString(clustatus_batch).firstChild
+	except:
+		return {}
+
+	try:
+		ricci_xml = rc.process_batch(clustatuscmd_xml, async=False)
+	except:
+		return {}
+
+	doc = getPayload(ricci_xml)
+	if not doc or not doc.firstChild:
+		return {}
+	results = list()
+
+	vals = {}
+	vals['type'] = "cluster"
+
+	try:
+		vals['alias'] = doc.firstChild.getAttribute('alias')
+	except AttributeError, e:
+		vals['alias'] = doc.firstChild.getAttribute('name')
+
+	vals['votes'] = doc.firstChild.getAttribute('votes')
+	vals['name'] = doc.firstChild.getAttribute('name')
+	vals['minQuorum'] = doc.firstChild.getAttribute('minQuorum')
+	vals['quorate'] = doc.firstChild.getAttribute('quorate')
+	results.append(vals)
+
+	for node in doc.firstChild.childNodes:
+		if node.nodeName == 'node':
+			vals = {}
+			vals['type'] = "node"
+			vals['clustered'] = node.getAttribute('clustered')
+			vals['name'] = node.getAttribute('name')
+			vals['online'] = node.getAttribute('online')
+			vals['uptime'] = node.getAttribute('uptime')
+			vals['votes'] = node.getAttribute('votes')
+			results.append(vals)
+		elif node.nodeName == 'service':
+			vals = {}
+			vals['type'] = 'service'
+			vals['name'] = node.getAttribute('name')
+			vals['nodename'] = node.getAttribute('nodename')
+			vals['running'] = node.getAttribute('running')
+			vals['failed'] = node.getAttribute('failed')
+			vals['autostart'] = node.getAttribute('autostart')
+			results.append(vals)
+	return results
 
 def getServicesInfo(self, status, modelb, req):
-  map = {}
-  maplist = list()
-  baseurl = req['URL']
-  cluname = req['clustername']
-  for item in status:
-    if item['type'] == "service":
-      itemmap = {}
-      itemmap['name'] = item['name']
-      if item['running'] == "true":
-        itemmap['running'] = "true"
-        itemmap['nodename'] = item['nodename']
-      itemmap['autostart'] = item['autostart']
-      itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE
-      svc = modelb.retrieveServiceByName(item['name'])
-      dom = svc.getAttribute("domain")
-      if dom != None:
-        itemmap['faildom'] = dom
-      else:
-        itemmap['faildom'] = "No Failover Domain"
+	map = {}
+	maplist = list()
+
+	try:
+		baseurl = req['URL']
+		if not baseurl:
+			raise KeyError, 'is blank'
+	except KeyError, e:
+		baseurl = '.'
 
-      maplist.append(itemmap)
+	try:
+		cluname = req['clustername']
+		if not cluname:
+			raise KeyError, 'is blank'
+	except KeyError, e:
+		try:
+			cluname = req.form['clusterName']
+			if not cluname:
+				raise
+		except:
+			cluname = '[error retrieving cluster name]'
 
-  map['services'] = maplist
+	for item in status:
+		if item['type'] == "service":
+			itemmap = {}
+			itemmap['name'] = item['name']
+			if item['running'] == "true":
+				itemmap['running'] = "true"
+				itemmap['nodename'] = item['nodename']
+			itemmap['autostart'] = item['autostart']
+			itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE
+
+			svc = modelb.retrieveServiceByName(item['name'])
+			dom = svc.getAttribute("domain")
+			if dom != None:
+				itemmap['faildom'] = dom
+			else:
+				itemmap['faildom'] = "No Failover Domain"
+			maplist.append(itemmap)
 
-  return map
+	map['services'] = maplist
+	return map
 
-def getServiceInfo(self,status,modelb,req):
-  #set up struct for service config page
-  baseurl = req['URL']
-  cluname = req['clustername']
-  hmap = {}
-  root_uuid = 'toplevel'
+def getServiceInfo(self, status, modelb, req):
+	#set up struct for service config page
+	hmap = {}
+	root_uuid = 'toplevel'
 
-  hmap['root_uuid'] = root_uuid
-  hmap['uuid_list'] = map(lambda x: make_uuid('resource'), xrange(30))
+	try:
+		baseurl = req['URL']
+		if not baseurl:
+			raise KeyError, 'is blank'
+	except KeyError, e:
+		baseurl = '.'
 
-  try:
-      servicename = req['servicename']
-  except KeyError, e:
-      hmap['resource_list'] = {}
-      return hmap
+	try:
+		cluname = req['clustername']
+		if not cluname:
+			raise KeyError, 'is blank'
+	except KeyError, e:
+		try:
+			cluname = req.form['clusterName']
+			if not cluname:
+				raise
+		except:
+			cluname = '[error retrieving cluster name]'
 
-  for item in status:
-    if item['type'] == "service":
-      if item['name'] == servicename:
-        hmap['name'] = servicename
-        starturls = list()
-        if item['running'] == "true":
-          hmap['running'] = "true"
-          #In this case, determine where it can run...
-          innermap = {}
-          nodename = item['nodename']
-          innermap['current'] = "This service is currently running on %s" % nodename
-          innermap['disableurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_STOP
-          innermap['restarturl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_RESTART
-          nodes = modelb.getNodes()
-          for node in nodes:
-            starturl = {}
-            if node.getName() != nodename:
-              starturl['nodename'] = node.getName()
-              starturl['url'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START + "&nodename=" + node.getName()
-              starturls.append(starturl)
-          innermap['links'] = starturls
-        else:
-          #Do not set ['running'] in this case...ZPT will detect it is missing
-          #In this case, determine where it can run...
-          innermap = {}
-          innermap['current'] = "This service is currently stopped"
-          innermap['enableurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START
-          nodes = modelb.getNodes()
-          starturls = list()
-          for node in nodes:
-            starturl = {}
-            starturl['nodename'] = node.getName()
-            starturl['url'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START + "&nodename=" + node.getName()
-            starturls.append(starturl)
-          innermap['links'] = starturls
-        hmap['innermap'] = innermap
-
-  #Now build hashes for resources under service.
-  #first get service by name from model
-  svc = modelb.getService(servicename)
-  resource_list = list()
-  if svc != None:
-    indent_ctr = 0
-    children = svc.getChildren()
-    for child in children:
-      recurse_resources(root_uuid, child, resource_list, indent_ctr)
+	hmap['root_uuid'] = root_uuid
+	# uuids for the service page needed when new resources are created
+	hmap['uuid_list'] = map(lambda x: make_uuid('resource'), xrange(30))
+
+	try:
+		servicename = req['servicename']
+	except KeyError, e:
+		hmap['resource_list'] = {}
+		return hmap
+
+	for item in status:
+		if item['type'] == "service":
+			if item['name'] == servicename:
+				hmap['name'] = servicename
+				starturls = list()
+				if item['running'] == "true":
+					hmap['running'] = "true"
+					#In this case, determine where it can run...
+					innermap = {}
+					nodename = item['nodename']
+					innermap['current'] = "This service is currently running on %s" % nodename
+					innermap['disableurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_STOP
+					innermap['restarturl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_RESTART
+					nodes = modelb.getNodes()
+					for node in nodes:
+						starturl = {}
+						if node.getName() != nodename:
+							starturl['nodename'] = node.getName()
+							starturl['url'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START + "&nodename=" + node.getName()
+							starturls.append(starturl)
+					innermap['links'] = starturls
+				else:
+					#Do not set ['running'] in this case...ZPT will detect it is missing
+					#In this case, determine where it can run...
+					innermap = {}
+					innermap['current'] = "This service is currently stopped"
+					innermap['enableurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START
+					nodes = modelb.getNodes()
+					starturls = list()
+					for node in nodes:
+						starturl = {}
+						starturl['nodename'] = node.getName()
+						starturl['url'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START + "&nodename=" + node.getName()
+						starturls.append(starturl)
+					innermap['links'] = starturls
+				hmap['innermap'] = innermap
+
+	#Now build hashes for resources under service.
+	#first get service by name from model
+	svc = modelb.getService(servicename)
+	resource_list = list()
+	if svc != None:
+		indent_ctr = 0
+		children = svc.getChildren()
+		for child in children:
+			recurse_resources(root_uuid, child, resource_list, indent_ctr)
 
-  hmap['resource_list'] = resource_list
-  return hmap
+	hmap['resource_list'] = resource_list
+	return hmap
 
 def recurse_resources(parent_uuid, child, resource_list, indent_ctr, parent=None):
-  #First, add the incoming child as a resource
-  #Next, check for children of it
-  #Call yourself on every children
-  #then return
-  rc_map = {}
-  if parent != None:
-    rc_map['parent'] = parent
-  rc_map['name'] = child.getName()
-  if child.isRefObject() == True:
-    rc_map['ref_object'] = True
-    rc_map['type'] = child.getObj().getResourceType()
-  else:
-    rc_map['type'] = child.getResourceType()
-
-  rc_map['indent_ctr'] = indent_ctr
-
-  #Note: Final version needs all resource attrs
-  rc_map['attrs'] = child.getAttributes()
-  rc_map['uuid'] = make_uuid('resource')
-  rc_map['parent_uuid'] = parent_uuid
-
-  resource_list.append(rc_map)
-  kids = child.getChildren()
-  child_depth = 0
-  new_indent_ctr = indent_ctr + 1
-  for kid in kids:
-    cdepth = recurse_resources(rc_map['uuid'], kid, resource_list, new_indent_ctr, child)
-    child_depth = max(cdepth, child_depth)
+	#First, add the incoming child as a resource
+	#Next, check for children of it
+	#Call yourself on every children
+	#then return
+	rc_map = {}
+	if parent != None:
+		rc_map['parent'] = parent
+	rc_map['name'] = child.getName()
+	if child.isRefObject() == True:
+		rc_map['ref_object'] = True
+		rc_map['type'] = child.getObj().getResourceType()
+	else:
+		rc_map['type'] = child.getResourceType()
 
-  rc_map['max_depth'] = child_depth
-  return child_depth + 1
+	rc_map['indent_ctr'] = indent_ctr
 
-def serviceStart(self, ricci_agent, req):
-  rb = ricci_bridge(ricci_agent)
-  svcname = req['servicename']
-  try:
-    nodename = req['nodename']
-  except KeyError, e:
-    nodename = None
-  batch_number,result = rb.startService(svcname,nodename)
+	#Note: Final version needs all resource attrs
+	rc_map['attrs'] = child.getAttributes()
+	rc_map['uuid'] = make_uuid('resource')
+	rc_map['parent_uuid'] = parent_uuid
 
+	resource_list.append(rc_map)
+	kids = child.getChildren()
+	child_depth = 0
+	new_indent_ctr = indent_ctr + 1
+	for kid in kids:
+		cdepth = recurse_resources(rc_map['uuid'], kid, resource_list, new_indent_ctr, child)
+		child_depth = max(cdepth, child_depth)
 
-  #Now we need to create a DB flag for this system.
-  cluname = req['clustername']
+	rc_map['max_depth'] = child_depth
+	return child_depth + 1
 
-  path = CLUSTER_FOLDER_PATH + cluname
-  clusterfolder = self.restrictedTraverse(path)
-  batch_id = str(batch_number)
-  objname = ricci_agent + "____flag"
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-  #Now we need to annotate the new DB object
-  objpath = path + "/" + objname
-  flag = self.restrictedTraverse(objpath)
-  #flag[BATCH_ID] = batch_id
-  #flag[TASKTYPE] = SERVICE_START
-  #flag[FLAG_DESC] = "Starting service " + svcname
-  flag.manage_addProperty(BATCH_ID,batch_id, "string")
-  flag.manage_addProperty(TASKTYPE,SERVICE_START, "string")
-  flag.manage_addProperty(FLAG_DESC,"Starting service \'" + svcname + "\'", "string")
+def serviceStart(self, rc, req):
+	try:
+		svcname = req['servicename']
+	except KeyError, e:
+		try:
+			svcname = req.form['servicename']
+		except:
+			return None
 
-  response = req.RESPONSE
-  response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+	try:
+		nodename = req['nodename']
+	except KeyError, e:
+		try:
+			nodename = req.form['nodename']
+		except:
+			return None
+	try:
+		cluname = req['clustername']
+	except KeyError, e:
+		try:
+			cluname = req.form['clusterName']
+		except:
+			return None
 
-def serviceRestart(self, ricci_agent, req):
-  rb = ricci_bridge(ricci_agent)
-  svcname = req['servicename']
-  batch_number, result = rb.restartService(svcname)
+	ricci_agent = rc.hostname()
+
+	batch_number, result = startService(rc, svcname, nodename)
+	#Now we need to create a DB flag for this system.
+
+	path = CLUSTER_FOLDER_PATH + cluname
+	clusterfolder = self.restrictedTraverse(path)
+	batch_id = str(batch_number)
+	objname = ricci_agent + "____flag"
+	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+	#Now we need to annotate the new DB object
+	objpath = path + "/" + objname
+	flag = self.restrictedTraverse(objpath)
+	#flag[BATCH_ID] = batch_id
+	#flag[TASKTYPE] = SERVICE_START
+	#flag[FLAG_DESC] = "Starting service " + svcname
+	flag.manage_addProperty(BATCH_ID,batch_id, "string")
+	flag.manage_addProperty(TASKTYPE,SERVICE_START, "string")
+	flag.manage_addProperty(FLAG_DESC,"Starting service \'" + svcname + "\'", "string")
+	response = req.RESPONSE
+	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
+def serviceRestart(self, rc, req):
+  svcname = req['servicename']
+  batch_number, result = restartService(rc, svcname)
 
+  ricci_agent = rc.hostname()
   #Now we need to create a DB flag for this system.
   cluname = req['clustername']
 
@@ -1466,14 +1558,15 @@
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
-def serviceStop(self, ricci_agent, req):
-  rb = ricci_bridge(ricci_agent)
+def serviceStop(self, rc, req):
   svcname = req['servicename']
-  batch_number, result = rb.stopService(svcname)
+  batch_number, result = stopService(rc, svcname)
 
   #Now we need to create a DB flag for this system.
   cluname = req['clustername']
 
+  ricci_agent = rc.hostname()
+
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
@@ -1787,175 +1880,241 @@
   return map
 
 def nodeTaskProcess(self, model, request):
-  clustername = request['clustername']
-  nodename = request['nodename']
-  task = request['task']
-  nodename_resolved = resolve_nodename(self, clustername, nodename)
-  if nodename_resolved == None:
-    return None
-
-  if task == NODE_LEAVE_CLUSTER:
-    rb = ricci_bridge(nodename_resolved)
-    batch_number, result = rb.nodeLeaveCluster()
-
-    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-    nodefolder = self.restrictedTraverse(path)
-    batch_id = str(batch_number)
-    objname = nodename_resolved + "____flag"
-    if noNodeFlagsPresent(self, nodefolder, objname, nodename_resolved) == False:
-      raise UnknownClusterError("Fatal", "An unfinished task flag exists for node %s" % nodename)
-
-    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-    #Now we need to annotate the new DB object
-    objpath = path + "/" + objname
-    flag = self.restrictedTraverse(objpath)
-    flag.manage_addProperty(BATCH_ID,batch_id, "string")
-    flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
-    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+	try:
+		clustername = request['clustername']
+	except KeyError, e:
+		try:
+			clustername = request.form['clusterName']
+		except:
+			return None
 
-    response = request.RESPONSE
-    #Is this correct? Should we re-direct to the cluster page?
-    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+	try:
+		nodename = request['nodename']
+	except KeyError, e:
+		try:
+			nodename = request.form['nodename']
+		except:
+			return None
 
-  elif task == NODE_JOIN_CLUSTER:
-    rb = ricci_bridge(nodename_resolved)
-    batch_number, result = rb.nodeJoinCluster()
-
-    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-    nodefolder = self.restrictedTraverse(path)
-    batch_id = str(batch_number)
-    objname = nodename_resolved + "____flag"
-    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-    #Now we need to annotate the new DB object
-    objpath = path + "/" + objname
-    flag = self.restrictedTraverse(objpath)
-    flag.manage_addProperty(BATCH_ID,batch_id, "string")
-    flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
-    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
+	try:
+		task = request['task']
+		if not task:
+			raise
+	except KeyError, e:
+		try:
+			task = request.form['task']
+		except:
+			return None
 
-    response = request.RESPONSE
-    #Once again, is this correct? Should we re-direct to the cluster page?
-    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+	nodename_resolved = resolve_nodename(self, clustername, nodename)
+	if not nodename_resolved or not nodename or not task or not clustername:
+		return None
 
+	if task != NODE_FENCE:
+		# Fencing is the only task for which we don't
+		# want to talk to the node on which the action is
+		# to be performed.
+		try:
+			rc = RicciCommunicator(nodename_resolved)
+			# XXX - check the cluster
+			if not rc.authed():
+				# set the flag
+				rc = None
 
-  elif task == NODE_REBOOT:
-    rb = ricci_bridge(nodename_resolved)
-    batch_number, result = rb.nodeReboot()
-
-    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-    nodefolder = self.restrictedTraverse(path)
-    batch_id = str(batch_number)
-    objname = nodename_resolved + "____flag"
-    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-    #Now we need to annotate the new DB object
-    objpath = path + "/" + objname
-    flag = self.restrictedTraverse(objpath)
-    flag.manage_addProperty(BATCH_ID,batch_id, "string")
-    flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
-    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
+			if not rc:
+				raise
+		except:
+			return None
 
-    response = request.RESPONSE
-    #Once again, is this correct? Should we re-direct to the cluster page?
-    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+	if task == NODE_LEAVE_CLUSTER:
+		batch_number, result = nodeLeaveCluster(rc)
+
+		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
+		nodefolder = self.restrictedTraverse(path)
+		batch_id = str(batch_number)
+		objname = nodename_resolved + "____flag"
+		if noNodeFlagsPresent(self, nodefolder, objname, nodename_resolved) == False:
+			raise UnknownClusterError("Fatal", "An unfinished task flag exists for node %s" % nodename)
+
+		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = path + "/" + objname
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID,batch_id, "string")
+		flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
+		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+
+		response = request.RESPONSE
+		#Is this correct? Should we re-direct to the cluster page?
+		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+	elif task == NODE_JOIN_CLUSTER:
+		batch_number, result = nodeJoinCluster(rc)
+		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
+		nodefolder = self.restrictedTraverse(path)
+		batch_id = str(batch_number)
+		objname = nodename_resolved + "____flag"
+		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = path + "/" + objname
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID,batch_id, "string")
+		flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
+		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
+
+		response = request.RESPONSE
+		#Once again, is this correct? Should we re-direct to the cluster page?
+		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+	elif task == NODE_REBOOT:
+		batch_number, result = nodeReboot(rc)
+		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
+		nodefolder = self.restrictedTraverse(path)
+		batch_id = str(batch_number)
+		objname = nodename_resolved + "____flag"
+		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = path + "/" + objname
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID,batch_id, "string")
+		flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
+		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
+
+		response = request.RESPONSE
+		#Once again, is this correct? Should we re-direct to the cluster page?
+		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+	elif task == NODE_FENCE:
+		#here, we DON'T want to open connection to node to be fenced.
+		path = CLUSTER_FOLDER_PATH + clustername
+		try:
+			clusterfolder = self.restrictedTraverse(path)
+			if not clusterfolder:
+				raise
+		except:
+			return None
 
+		nodes = clusterfolder.objectItems('Folder')
+		found_one = False
+		for node in nodes:
+			if node[1].getID().find(nodename) != (-1):
+				continue
 
-  elif task == NODE_FENCE:
-    #here, we DON'T want to open connection to node to be fenced.
-    path = CLUSTER_FOLDER_PATH + clustername
-    clusterfolder = self.restrictedTraverse(path)
-    if clusterfolder != None:
-      nodes = clusterfolder.objectItems('Folder')
-      found_one = False
-      for node in nodes:
-        if node[1].getID().find(nodename) != (-1):
-          continue
-        rb = ricci_bridge(node[1].getId())
-        if rb.getRicciResponse() == True:
-          found_one = True
-          break
-      if found_one == False:
-        return None
-    else:
-      return None
+			try:
+				rc = RicciCommunicator(node[1].getId())
+				if not rc.authed():
+					# set the node flag
+					rc = None
+				if not rc:
+					raise
+				found_one = True
+				break
+			except:
+				continue
+		if not found_one:
+			return None
 
-    batch_number, result = rb.nodeFence(nodename)
+		batch_number, result = nodeFence(rc, nodename)
+		path = path + "/" + nodename_resolved
+		nodefolder = self.restrictedTraverse(path)
+		batch_id = str(batch_number)
+		objname = nodename_resolved + "____flag"
+		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = path + "/" + objname
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID,batch_id, "string")
+		flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
+		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
+
+		response = request.RESPONSE
+		#Once again, is this correct? Should we re-direct to the cluster page?
+		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+	elif task == NODE_DELETE:
+		#We need to get a node name other than the node
+		#to be deleted, then delete the node from the cluster.conf
+		#and propogate it. We will need two ricci agents for this task.
 
-    path = path + "/" + nodename_resolved
-    nodefolder = self.restrictedTraverse(path)
-    batch_id = str(batch_number)
-    objname = nodename_resolved + "____flag"
-    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-    #Now we need to annotate the new DB object
-    objpath = path + "/" + objname
-    flag = self.restrictedTraverse(objpath)
-    flag.manage_addProperty(BATCH_ID,batch_id, "string")
-    flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
-    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
+		# Make sure we can find a second node before we hose anything.
+		path = CLUSTER_FOLDER_PATH + clustername
+		try:
+			clusterfolder = self.restrictedTraverse(path)
+			if not clusterfolder:
+				raise
+		except:
+			return None
 
-    response = request.RESPONSE
-    #Once again, is this correct? Should we re-direct to the cluster page?
-    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
+		nodes = clusterfolder.objectItems('Folder')
+		found_one = False
 
+		for node in nodes:
+			if node[1].getId().find(nodename) != (-1):
+				continue
+			#here we make certain the node is up...
+			# XXX- we should also make certain this host is still
+			# in the cluster we believe it is.
+			try:
+				rc2 = RicciCommunicator(node[1].getId())
+				if not rc2.authed():
+					# set the flag
+					rc2 = None
+				if not rc2:
+					raise
+				found_one = True
+				break
+			except:
+				continue
 
-  elif task == NODE_DELETE:
-    #We need to get a node name other than the node
-    #to be deleted, then delete the node from the cluster.conf
-    #and propogate it. We will need two ricci agents for this task.
-
-    #First, delete cluster.conf from node to be deleted.
-
-    #next, have node leave cluster.
-    rb = ricci_bridge(nodename_resolved)
-    batch_number, result = rb.nodeLeaveCluster()
-
-    #It is not worth flagging this node in DB, as we are going
-    #to delete it anyway. Now, we need to delete node from model
-    #and send out new cluster.conf
-
-    model.deleteNode(nodename)
-    str_buf = ""
-    model.exportModelAsString(str_buf)
-
-    #here, we DON'T want to open connection to node to be fenced.
-    path = CLUSTER_FOLDER_PATH + clustername
-    clusterfolder = self.restrictedTraverse(path)
-    if clusterfolder != None:
-      nodes = clusterfolder.objectItems('Folder')
-      found_one = False
-      for node in nodes:
-        if node[1].getID().find(nodename) != (-1):
-          continue
-        #here we make certain the node is up...
-        rbridge = ricci_bridge(node[1].getId())
-        if rbridge.getRicciResponse() == True:
-          found_one = True
-          break
-      if found_one == False:
-        return None
-    else:
-      return None
+		if not found_one:
+			return None
 
-    batch_number, result = rbridge.setClusterConf(str(str_buf))
+		#First, delete cluster.conf from node to be deleted.
+		#next, have node leave cluster.
+		batch_number, result = nodeLeaveCluster(rc)
+
+		#It is not worth flagging this node in DB, as we are going
+		#to delete it anyway. Now, we need to delete node from model
+		#and send out new cluster.conf
+		delete_target = None
+		try:
+			nodelist = model.getClusterNodesPtr().getChildren()
+			for n in nodelist:
+				if n.getName() == nodename:
+					delete_target = n
+					break
+		except:
+			return None
 
-    #Now we need to delete the node from the DB
-    path = CLUSTER_FOLDER_PATH + clustername
-    del_path = path + "/" + nodename_resolved
-    delnode = self.restrictedTraverse(del_path)
-    clusterfolder = self.restrictedTraverse(path)
-    clusterfolder.manage_delObjects(delnode[0])
+		if delete_target is None:
+			return None
 
-    batch_id = str(batch_number)
-    objname = nodename_resolved + "____flag"
-    clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-    #Now we need to annotate the new DB object
-    objpath = path + "/" + objname
-    flag = self.restrictedTraverse(objpath)
-    flag.manage_addProperty(BATCH_ID,batch_id, "string")
-    flag.manage_addProperty(TASKTYPE,NODE_DELETE, "string")
-    flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
-    response = request.RESPONSE
-    response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
+		model.deleteNode(delete_target)
+		str_buf = ""
+		model.exportModelAsString(str_buf)
+
+		# propagate the new cluster.conf via the second node
+		batch_number, result = setClusterConf(rc2, str(str_buf))
+
+		#Now we need to delete the node from the DB
+		path = str(CLUSTER_FOLDER_PATH + clustername)
+		del_path = str(path + "/" + nodename_resolved)
 
+		try:
+			delnode = self.restrictedTraverse(del_path)
+			clusterfolder = self.restrictedTraverse(path)
+			clusterfolder.manage_delObjects(delnode[0])
+		except:
+			# XXX - we need to handle this
+			pass
+
+		batch_id = str(batch_number)
+		objname = str(nodename_resolved + "____flag")
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = str(path + "/" + objname)
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID,batch_id, "string")
+		flag.manage_addProperty(TASKTYPE,NODE_DELETE, "string")
+		flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
+		response = request.RESPONSE
+		response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
 def getNodeInfo(self, model, status, request):
   infohash = {}
@@ -2030,13 +2189,13 @@
   infohash['d_states'] = None
   if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
   #call service module on node and find out which daemons are running
-    rb = ricci_bridge(nodename)
+    rc = RicciCommunicator(nodename)
     dlist = list()
     dlist.append("ccsd")
     dlist.append("cman")
     dlist.append("fenced")
     dlist.append("rgmanager")
-    states = rb.getDaemonStates(dlist)
+    states = getDaemonStates(rc, dlist)
     infohash['d_states'] = states
 
   infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername
@@ -2157,6 +2316,8 @@
       return map
 
     for i in xrange(2):
+      if not i in levels:
+        continue
       fence_struct = {}
       if levels[i] != None:
         level = levels[i]
@@ -2177,6 +2338,8 @@
         for kid in kids:
           name = kid.getName()
           found_fd = False
+          if not i in map:
+			continue
           for entry in map[i]:
             if entry['name'] == name:
               fence_struct = entry
@@ -2213,18 +2376,36 @@
 
     return map    
       
+def getLogsForNode(self, request):
+	try:
+		nodename = request['nodename']
+	except KeyError, e:
+		try:
+			nodename = request.form['nodename']
+		except:
+			return "Unable to resolve node name %s to retrieve logging information" % nodename
 
+	try:
+		clustername = request['clustername']
+	except KeyError, e:
+		try:
+			clustername = request.form['clusterName']
+		except:
+			return "Unable to resolve node name %s to retrieve logging information" % nodename
 
-def getLogsForNode(self, request):
-  nodename = request['nodename']
-  clustername = request['clustername']
-  try:
-    nodename_resolved = resolve_nodename(self, clustername, nodename)
-  except:
-    return "Unable to resolve node name %s to retrieve logging information" % nodename
+	try:
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+	except:
+		return "Unable to resolve node name %s to retrieve logging information" % nodename
 
-  rb = ricci_bridge(nodename_resolved)
-  return rb.getNodeLogs()
+	try:
+		rc = RicciCommunicator(nodename_resolved)
+		if not rc:
+			raise
+	except:
+		return "Unable to resolve node name %s to retrieve logging information" % nodename_resolved
+
+	return getNodeLogs(rc)
 
 def isClusterBusy(self, req):
   items = None
@@ -2233,14 +2414,34 @@
   redirect_message = False
   nodereports = list()
   map['nodereports'] = nodereports
-  cluname = req['clustername']
+
+  try:
+    cluname = req['clustername']
+  except KeyError, e:
+    try:
+      cluname = req.form['clustername']
+    except:
+      try:
+        cluname = req.form['clusterName']
+      except:
+        return map
+
   path = CLUSTER_FOLDER_PATH + cluname
-  clusterfolder = self.restrictedTraverse(path)
-  items = clusterfolder.objectItems('ManagedSystem')
-  if len(items) == 0:
-    return map  #This returns an empty map, and should indicate not busy
-  else:
-    map['busy'] = "true"
+  try:
+    clusterfolder = self.restrictedTraverse(str(path))
+    if not clusterfolder:
+      raise
+  except:
+    return map
+
+  try:
+    items = clusterfolder.objectItems('ManagedSystem')
+    if len(items) == 0:
+      return map  #This returns an empty map, and should indicate not busy
+  except:
+    return map
+    
+  map['busy'] = "true"
   #Ok, here is what is going on...if there is an item,
   #we need to call the ricci_bridge and get a batch report.
   #This report will tell us one of three things:
@@ -2352,8 +2553,8 @@
       node_report = {}
       node_report['isnodecreation'] = False
       ricci = item[0].split("____") #This removes the 'flag' suffix
-      rb = ricci_bridge(ricci[0])
-      finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
+      rc = RicciCommunicator(ricci[0])
+      finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
       if finished == True:
         node_report['desc'] = item[1].getProperty(FLAG_DESC) + REDIRECT_MSG
         nodereports.append(node_report)
@@ -2381,38 +2582,17 @@
     map['refreshurl'] = '5; url=\".\"'
   return map
 
-def getClusterOS(self, ragent, request):
-  try:
-    clustername = request['clustername']
-  except KeyError, e:
-    try:
-      clustername = request.form['clustername']
-    except:
-      return {}
-  except:
-    return {}
-
-  try:
-    ricci_agent = resolve_nodename(self, clustername, ragent)
-  except:
-    map = {}
-    map['os'] = ""
-    map['isVirtualized'] = False
-    return map
-  
-  try:
-    rc = RicciCommunicator(ricci_agent)
-  except:
-    map = {}
-    map['os'] = ""
-    map['isVirtualized'] = False
-    return map
-
-  map = {}
-  os_str = resolveOSType(rc.os())
-  map['os'] = os_str
-  map['isVirtualized'] = rc.dom0()
-  return map
+def getClusterOS(self, rc):
+	map = {}
+	try:
+		os_str = resolveOSType(rc.os())
+		map['os'] = os_str
+		map['isVirtualized'] = rc.dom0()
+	except:
+		# default to rhel5 if something crazy happened.
+		map['os'] = 'rhel5'
+		map['isVirtualized'] = False
+	return map
 
 def getResourcesInfo(modelb, request):
 	resList = list()
@@ -2477,41 +2657,73 @@
 			except:
 				return {}
 
-def delResource(self, request, ragent):
-  modelb = request.SESSION.get('model')
-  resPtr = modelb.getResourcesPtr()
-  resources = resPtr.getChildren()
-  name = request['resourcename']
-  for res in resources:
-    if res.getName() == name:
-      resPtr.removeChild(res)
-      break
+def delResource(self, rc, request):
+	errstr = 'An error occurred in while attempting to set the cluster.conf'
 
-  modelstr = ""
-  conf = modelb.exportModelAsString()
-  rb = ricci_bridge(ragent)
-  #try:
-  if True:
-    batch_number, result = rb.setClusterConf(str(conf))
-  #except:
-  else:
-    return "Some error occured in setClusterConf\n"
+	try:
+		modelb = request.SESSION.get('model')
+	except:
+		return errstr
 
-  clustername = request['clustername']
-  path = CLUSTER_FOLDER_PATH + clustername
-  clusterfolder = self.restrictedTraverse(path)
-  batch_id = str(batch_number)
-  objname = ragent + "____flag"
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-   #Now we need to annotate the new DB object
-  objpath = path + "/" + objname
-  flag = self.restrictedTraverse(objpath)
-  flag.manage_addProperty(BATCH_ID,batch_id, "string")
-  flag.manage_addProperty(TASKTYPE,RESOURCE_REMOVE, "string")
-  flag.manage_addProperty(FLAG_DESC,"Removing Resource \'" + request['resourcename'] + "\'", "string")
+	try:
+		name = request['resourcename']
+	except KeyError, e:
+		return errstr + ': ' + str(e)
+
+	try:
+		clustername = request['clustername']
+	except KeyError, e:
+		try:
+			clustername = request.form['clustername']
+		except:
+			return errstr + ': could not determine the cluster name.'
+
+	try:
+		ragent = rc.hostname()
+		if not ragent:
+			raise
+	except:
+		return errstr
+
+	resPtr = modelb.getResourcesPtr()
+	resources = resPtr.getChildren()
+
+	found = 0
+	for res in resources:
+		if res.getName() == name:
+			resPtr.removeChild(res)
+			found = 1
+			break
+
+	if not found:
+		return errstr + ': the specified resource was not found.'
+
+	try:
+		conf = modelb.exportModelAsString()
+		if not conf:
+			raise
+	except:
+		return errstr
+
+	batch_number, result = setClusterConf(str(conf))
+	if batch_number is None or result is None:
+		return errstr
+
+	modelstr = ""
+	path = CLUSTER_FOLDER_PATH + str(clustername)
+	clusterfolder = self.restrictedTraverse(path)
+	batch_id = str(batch_number)
+	objname = str(ragent) + '____flag'
+	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+	#Now we need to annotate the new DB object
+	objpath = str(path + '/' + objname)
+	flag = self.restrictedTraverse(objpath)
+	flag.manage_addProperty(BATCH_ID, batch_id, "string")
+	flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
+	flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
 
-  response = request.RESPONSE
-  response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
+	response = request.RESPONSE
+	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
 def addIp(request):
 	modelb = request.SESSION.get('model')
@@ -2973,16 +3185,24 @@
 	messages = list()
 	for i in missing_list:
 		cluster_node.delObjects([i])
+		## or alternately
+		#new_node = cluster_node.restrictedTraverse(i)
+		#setNodeFlag(self, new_node, CLUSTER_NODE_NOT_MEMBER)
 		messages.append('Node \"' + i + '\" is no longer in a member of cluster \"' + clusterName + '.\". It has been deleted from the management interface for this cluster.')
 
+	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
 	for i in new_list:
-		cluster_node.manage_addFolder(i, '__luci__:csystem:' + clusterName)
-		cluster_node.manage_addProperty('exceptions', 'auth', 'string')
-		messages.append('A new node, \"' + i + ',\" is now a member of cluster \"' + clusterName + '.\". It has added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.')
+		try:
+			cluster_node.manage_addFolder(i, '__luci__:csystem:' + clusterName)
+			new_node = cluster_node.restrictedTraverse(i)
+			setNodeFlag(self, new_node, new_flags)
+			messages.append('A new node, \"' + i + ',\" is now a member of cluster \"' + clusterName + '.\" It has added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.')
+		except:
+			messages.append('A new node, \"' + i + ',\" is now a member of cluster \"' + clusterName + ',\". but has not added to the management interface for this cluster as a result of an error creating the database entry.')
 	
 	return messages
 
-def addResource(self, request, ragent):
+def addResource(self, rc, request):
 	if not request.form:
 		return (False, {'errors': ['No form was submitted.']})
 
@@ -3000,47 +3220,57 @@
 		if request.form['type'] != 'ip':
 			return (False, {'errors': ['No resource name was given.']})
 
+	try:
+		clustername = request['clustername']
+	except KeyError, e:
+		try:
+			clustername = request.form['clustername']
+		except:
+			return 'unable to determine the current cluster\'s name'
+
 	res = resourceAddHandler[type](request)
 	modelb = request.SESSION.get('model')
 	modelstr = ""
 	conf = modelb.exportModelAsString()
-	rb = ricci_bridge(ragent)
-	#try:
-	if True:
-		batch_number, result = rb.setClusterConf(str(conf))
-	#except:
-	else:
+
+	try:
+		ragent = rc.hostname()
+		if not ragent:
+			raise
+		batch_number, result = setClusterConf(str(conf))
+		if batch_number is None or result is None:
+			raise
+	except:
 		return "Some error occured in setClusterConf\n"
 
-	clustername = request['clustername']
-	path = CLUSTER_FOLDER_PATH + clustername
+	path = str(CLUSTER_FOLDER_PATH + clustername)
 	clusterfolder = self.restrictedTraverse(path)
 	batch_id = str(batch_number)
-	objname = ragent + "____flag"
+	objname = str(ragent + '____flag')
 	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
 	#Now we need to annotate the new DB object
-	objpath = path + "/" + objname
+	objpath = str(path + '/' + objname)
 	flag = self.restrictedTraverse(objpath)
-	flag.manage_addProperty(BATCH_ID,batch_id, "string")
-	flag.manage_addProperty(TASKTYPE,RESOURCE_ADD, "string")
+	flag.manage_addProperty(BATCH_ID, batch_id, "string")
+	flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
+
 	if type != 'ip':
-		flag.manage_addProperty(FLAG_DESC,"Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
+		flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
 	else:
-		flag.manage_addProperty(FLAG_DESC,"Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+		flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+
 	response = request.RESPONSE
 	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
 def getResourceForEdit(modelb, name):
-  resPtr = modelb.getResourcesPtr()
-  resources = resPtr.getChildren()
-
-  for res in resources:
-    if res.getName() == name:
-      resPtr.removeChild(res)
-      break
-
-  return res
+	resPtr = modelb.getResourcesPtr()
+	resources = resPtr.getChildren()
 
+	for res in resources:
+		if res.getName() == name:
+			resPtr.removeChild(res)
+			return res
+	raise KeyError, name
 
 def appendModel(request, model):
 	try:
@@ -3049,26 +3279,35 @@
 		pass
 
 def resolve_nodename(self, clustername, nodename):
-  path = CLUSTER_FOLDER_PATH + clustername
-  clusterfolder = self.restrictedTraverse(path)
-  objs = clusterfolder.objectItems('Folder')
-  for obj in objs:
-    if obj[0].find(nodename) != (-1):
-      return obj[0]
-
-  raise
+	path = CLUSTER_FOLDER_PATH + clustername
+	clusterfolder = self.restrictedTraverse(path)
+	objs = clusterfolder.objectItems('Folder')
+	for obj in objs:
+		if obj[0].find(nodename) != (-1):
+			return obj[0]
+	raise
 
 def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
-  items = nodefolder.objectItems()
-  for item in items:
-    if item[0] == flagname:  #a flag already exists...
-      #try and delete it
-      rb = ricci_bridge(hostname)
-      finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
-      if finished == True:
-        nodefolder.manage_delObjects(item[0])
-        return True
-      else:
-        return False #Not finished, so cannot remove flag
+	items = nodefolder.objectItems('ManagedSystem')
 
-  return True
+	for item in items:
+		if item[0] != flagname:
+			continue
+
+		#a flag already exists... try to delete it
+		rc = RicciCommunicator(hostname)
+		finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
+		if finished == True:
+			try:
+				nodefolder.manage_delObjects(item[0])
+			except:
+				return False
+			return True
+		else:
+			#Not finished, so cannot remove flag
+			return False
+	return True
+
+def getModelBuilder(rc):
+	cluster_conf_node = getClusterConf(rc)
+	return ModelBuilder(0, None, None, cluster_conf_node)
--- conga/luci/site/luci/Extensions/Variable.py	2006/10/15 22:34:54	1.2
+++ conga/luci/site/luci/Extensions/Variable.py	2006/10/16 04:26:19	1.3
@@ -85,7 +85,6 @@
         self.__name = str(name)
         self.__mods = mods
         self.set_value(value)
-        return
     
     def get_name(self):
         return self.__name
--- conga/luci/site/luci/Extensions/PropsObject.py	2006/05/30 20:17:21	1.1
+++ conga/luci/site/luci/Extensions/PropsObject.py	2006/10/16 04:26:19	1.2
@@ -10,7 +10,6 @@
     
     def __init__(self):
         self.__vars = {}
-        return
     
     def add_prop(self, variable):
         self.__vars[variable.get_name()] = variable



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-09-08 22:54 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-09-08 22:54 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-09-08 22:54:32

Modified files:
	luci/cluster   : form-macros index_html resource-form-macros 
	                 resource_form_handlers.js 
	luci/homebase  : homebase_common.js luci_homebase.css 
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	resource and service stuff

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.56&r2=1.57
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.11&r2=1.12
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&r1=1.7&r2=1.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource_form_handlers.js.diff?cvsroot=cluster&r1=1.4&r2=1.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/homebase_common.js.diff?cvsroot=cluster&r1=1.7&r2=1.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/luci_homebase.css.diff?cvsroot=cluster&r1=1.19&r2=1.20
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.62&r2=1.63

--- conga/luci/cluster/form-macros	2006/09/06 22:13:33	1.56
+++ conga/luci/cluster/form-macros	2006/09/08 22:54:32	1.57
@@ -1139,13 +1139,14 @@
 	<br/>
 	<h2>Service Composition</h2>
 
+	<div id="service_comp_list">
 	<div tal:repeat="res sinfo/resource_list"
 		tal:attributes="class python: 'service_comp rc_indent' + str(res['indent_ctr'])">
 
 
 		<tal:block
 			tal:condition="python: res['max_depth'] > 0"
-			tal:replace="structure string:<div>" />
+			tal:replace="structure python: '<div>'" />
 
 
 		<tal:block tal:define="
@@ -1187,8 +1188,8 @@
 		<div
 			tal:condition="python: res['max_depth'] != 0"
 			tal:attributes="
-				class python: 'service_tree rc_indent' + str(res['indent_ctr'] - 1)">
-			<img class="service_tree" name="arrow_down"
+				class python: 'rc_indent' + str(res['indent_ctr'] - 1)">
+			<img class="service_tree" name="arrow_down" alt="[-]"
 				src="/luci/cluster/arrow_down.png"
 				onClick="collapse_div(this)">
 			<span class="service_tree">Hide Children</span>
@@ -1197,6 +1198,7 @@
 		<tal:block
 			tal:replace="structure python: '</div>' * (res['indent_ctr'] - res['max_depth'])" />
 	</div>
+	</div>
 </div>
 
 <div metal:define-macro="service-form">
--- conga/luci/cluster/index_html	2006/08/28 23:04:59	1.11
+++ conga/luci/cluster/index_html	2006/09/08 22:54:32	1.12
@@ -29,6 +29,7 @@
       </metal:cache>
 
       <metal:headslot define-slot="head_slot" />
+		<tal:block tal:define="global sinfo nothing" />
     <div tal:define="global hascluster request/clustername |nothing; global busywaiting python:None;"/>
     <span tal:condition="not: hascluster">
     <meta googaa="ooo"/>
--- conga/luci/cluster/resource-form-macros	2006/09/06 22:13:33	1.7
+++ conga/luci/cluster/resource-form-macros	2006/09/08 22:54:32	1.8
@@ -70,22 +70,25 @@
 		src="/luci/cluster/resource_form_handlers.js">
 	</script>
 
+	<tal:block
+		tal:define="
+			global res python: here.getResourceInfo(modelb, request);" />
+
 	<h2>Add a Resource</h2>
-	<strong>Select Resource Type</strong>
+	<strong>Select a Resource Type</strong>
 	<br/>
 
 	<form>
 	<select name="select_div"
 		onChange="swap_div('container', this.form.select_div.options[this.form.select_div.selectedIndex].value);">
-		<option name="blank" value="blank">Select a resource type</option>
-		<option name="IP" value="IP">IP Resource</option>
-		<option name="FS" value="FS">FS Resource</option>
-		<option name="GFS" value="GFS">GFS Resource</option>
-		<option name="NFSM" value="NFSM">NFS Mount Resource</option>
-		<option name="NFSC" value="NFSC">NFS Client Resource</option>
-		<option name="NFSX" value="NFSX">NFS Export Resource</option>
-		<option name="SCR" value="SCR">Script Resource</option>
-		<option name="SMB" value="SMB">Samba Resource</option>
+		<option name="IP" value="IP">IP address</option>
+		<option name="FS" value="FS">File system</option>
+		<option name="GFS" value="GFS">GFS file system</option>
+		<option name="NFSM" value="NFSM">NFS mount</option>
+		<option name="NFSC" value="NFSC">NFS client</option>
+		<option name="NFSX" value="NFSX">NFS export</option>
+		<option name="SCR" value="SCR">Script</option>
+		<option name="SMB" value="SMB">Samba</option>
 	</select>
 	</form>
 
@@ -150,19 +153,19 @@
 
 <div metal:define-macro="resourceconfig-form">
 	<script type="text/javascript"
-		src="luci/cluster/resource_form_handlers.js">
+		src="/luci/cluster/resource_form_handlers.js">
 	</script>
 
 	<tal:block tal:define="
-		global msg python: here.appendModel(request, modelb);
 		global restoedit request/resourcename | nothing" />
 
-	<tal:block tal:condition="restoedit">
+	<tal:block tal:condition="restoedit"
 		tal:define="
+			global msg python: here.appendModel(request, modelb);
 			global res python: here.getResourceInfo(modelb, request);
-			global type res/tag_name">
+			global type python: res['tag_name']">
 
-		<h2>Configure <span tal:replace="res/name"/></h2>
+		<h2>Configure <span tal:replace="python: res['name']" /></h2>
 
 		<span tal:omit-tag="" tal:condition="python: type == 'ip'">
 			<div metal:use-macro="here/resource-form-macros/macros/ip_macro"/>
@@ -198,17 +201,12 @@
 	</tal:block>
 </div>
 
-<div id="IP" metal:define-macro="ip_macro">
-
+<div class="rescfg" id="IP" metal:define-macro="ip_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype == '24'"
 	tal:define="
 		global edit python: True;
-		global resName res/name;
-		global address python: resName.split('.');
-		global ip1 python: address[0];
-		global ip2 python: address[1];
-		global ip3 python: address[2];
-		global ip4 python: address[3];
+		global resName res/name | nothing;
+		global ip_address python: resName;
 		global monitor_link res/attrs/monitor_link | nothing;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
@@ -223,17 +221,20 @@
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
 
-	<strong class="reshdr">IP Address Resource Configuration</strong>
+	<div class="reshdr">IP Address Resource Configuration</div>
 
 	<form name="ip_form" id="ip_form" method="get"
 		tal:attributes="action processURL">
-
 	<input name="pagetype" type="hidden" value="35"/>
 
-	<input name="clustername" type="hidden" tal:attributes="value cluname"/>
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
+	<input name="clustername" type="hidden"
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
-		tal:attributes="value res/name"
+		tal:attributes="value res/name | nothing"
 		tal:condition="python: edit == True" />
 
 	<input name="edit" type="hidden" value="1"
@@ -243,20 +244,14 @@
 
 	<table id="ipResourceTable" class="systemsTable">
 		<tr class="systemsTable">
+			<td class="systemsTable">IP address</td>
 			<td class="systemsTable">
-				<strong>IP</strong>
-			</td>
-			<td class="systemsTable">
-				<input size="3" name="ip1" type="text" maxlength="3" tal:attributes="value ip1 | nothing"/>.
-				<input size="3" name="ip2" type="text" maxlength="3" tal:attributes="value ip2 | nothing"/>.
-				<input size="3" name="ip3" type="text" maxlength="3" tal:attributes="value ip3 | nothing"/>.
-				<input size="3" name="ip4" type="text" maxlength="3" tal:attributes="value ip4 | nothing"/>
+				<input size="15" name="ip_address" type="text" maxlength="15"
+					tal:attributes="value ip_address | nothing" />
 			</td>
 		</tr>
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Monitor Link</strong>
-			</td>
+			<td class="systemsTable">Monitor link</td>
 			<td class="systemsTable">
 				<input type="checkbox" name="monitorLink"
 					tal:attributes="
@@ -266,30 +261,34 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
 </div>
 
-<div id="FS" metal:define-macro="fs_macro">
+<div class="rescfg" id="FS" metal:define-macro="fs_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype == '24'"
 	tal:define="
 		global edit python: True;
-		global resName res/name;
-		global mountPoint res/attrs/mountpoint;
-		global device res/attrs/device;
-		global fstype res/attrs/fstype;
-		global opt res/attrs/options;
-		global fsid res/attrs/fsid;
-		global force_unmount res/attrs/force_unmount;
-		global reboot_fail res/attrs/self_fence;
-		global fscheck res/attrs/force_fsck;
+		global resName res/name | nothing;
+		global mountPoint res/attrs/mountpoint | nothing;
+		global device res/attrs/device | nothing;
+		global fstype res/attrs/fstype | nothing;
+		global opt res/attrs/options | nothing;
+		global fsid res/attrs/fsid | nothing;
+		global force_unmount res/attrs/force_unmount | nothing;
+		global reboot_fail res/attrs/self_fence | nothing;
+		global fscheck res/attrs/force_fsck | nothing;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname+'&edit=1'" />
@@ -305,17 +304,20 @@
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
 
-	<strong class="reshdr">File System Resource Configuration</strong>
+	<div class="reshdr">File System Resource Configuration</div>
 
 	<form name="fs_form" method="get" tal:attributes="action processURL">
-
 	<input name="pagetype" type="hidden" value="35" />
 
-	<input name="clustername" type="hidden" tal:attributes="value cluname" />
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
+	<input name="clustername" type="hidden"
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
 		tal:condition="python: edit == True"
-		tal:attributes="value res/name" />
+		tal:attributes="value res/name | nothing" />
 
 	<input name="edit" type="hidden" value="1"
 		tal:condition="python: edit == True" />
@@ -324,9 +326,7 @@
 
 	<table id="fileSystemTable" class="systemsTable">
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Resource Name</strong>
-			</td>
+			<td class="systemsTable">Name</td>
 			<td class="systemsTable">
 				<input type="text" size="20"
 					id="resourceName" name="resourceName"
@@ -335,9 +335,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>File System Type</strong>
-			</td>
+			<td class="systemsTable">File system type</td>
 			<td class="systemsTable">
 				<select id="fstype" name="fstype">
 					<option name="ext3" value="ext3"
@@ -351,29 +349,23 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Mount Point</strong>
-			</td>
+			<td class="systemsTable">Mount point</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="mountpoint" name="mountpoint"
-					tal:attributes="value mountPoint | nothing"/>
+					tal:attributes="value mountPoint | nothing" />
 			</td>
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Device</strong>
-			</td>
+			<td class="systemsTable">Device</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="device" name="device"
-					tal:attributes="value device | nothing"/>
+					tal:attributes="value device | nothing" />
 			</td>
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Options</strong>
-			</td>
+			<td class="systemsTable">Options</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="options" name="options"
 					tal:attributes="value opt | nothing" />
@@ -381,9 +373,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>File System ID</strong>
-			</td>
+			<td class="systemsTable">File system ID</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="fsid" name="fsid"
 					tal:attributes="value fsid | nothing" />
@@ -391,9 +381,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Force unmount</strong>
-			</td>
+			<td class="systemsTable">Force unmount</td>
 			<td class="systemsTable">
 				<input type="checkbox" id="forceunmount" name="forceunmount"
 					tal:attributes="checked python: force_unmount == '1' and 'checked'" />
@@ -401,9 +389,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Reboot host node if unmount fails</strong>
-			</td>
+			<td class="systemsTable">Reboot host node if unmount fails</td>
 			<td class="systemsTable">
 				<input type="checkbox" id="selffence" name="selffence"
 					tal:attributes="
@@ -412,9 +398,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Check file system before mounting</strong>
-			</td>
+			<td class="systemsTable">Check file system before mounting</td>
 			<td class="systemsTable">
 				<input type="checkbox" id="checkfs" name="checkfs"
 					tal:attributes="
@@ -424,29 +408,33 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
 </div>
 
-<div id="GFS" metal:define-macro="gfs_macro">
+<div class="rescfg" id="GFS" metal:define-macro="gfs_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype =='24'"
 	tal:define="
 		global edit python: True;
-		global resName res/name;
-		global mountPoint res/attrs/mountpoint;
-		global device res/attrs/device;
-		global fstype res/attrs/fstype;
-		global opt res/attrs/options;
-		global fsid res/attrs/fsid;
-		global force_unmount res/attrs/force_unmount;
-		global cluname request/clustername;
+		global resName res/name | nothing;
+		global mountPoint res/attrs/mountpoint | nothing;
+		global device res/attrs/device | nothing;
+		global fstype res/attrs/fstype | nothing;
+		global opt res/attrs/options | nothing;
+		global fsid res/attrs/fsid | nothing;
+		global force_unmount res/attrs/force_unmount | nothing;
+		global cluname request/clustername | nothing;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname+'&edit=1'" />
 
@@ -458,17 +446,20 @@
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
 
-	<strong class="reshdr">GFS Resource Configuration</strong>
-	<form name="gfs_form" method="get" tal:attributes="action processURL">
+	<div class="reshdr">GFS Resource Configuration</div>
 
+	<form name="gfs_form" method="get" tal:attributes="action processURL">
 	<input name="pagetype" type="hidden" value="35" />
 
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
 	<input name="clustername" type="hidden"
-		tal:attributes="value cluname" />
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
 		tal:condition="python: edit == True"
-		tal:attributes="value res/name" />
+		tal:attributes="value res/name | nothing" />
 
 	<input name="edit" type="hidden" value="1"
 		tal:condition="python: edit == True" />
@@ -477,9 +468,7 @@
 
 	<table id="gfsTable" class="systemsTable">
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Resource Name</strong>
-			</td>
+			<td class="systemsTable">Name</td>
 			<td class="systemsTable">
 				<input type="text" size="20"
 					id="resourceName" name="resourceName"
@@ -488,10 +477,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Mount Point</strong>
-			</td>
-
+			<td class="systemsTable">Mount point</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="mountPoint" name="mountpoint"
 					tal:attributes="value mountpoint | nothing" />
@@ -499,9 +485,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Device</strong>
-			</td>
+			<td class="systemsTable">Device</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="device" name="device"
 					tal:attributes="value device | nothing"/>
@@ -509,9 +493,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Options</strong>
-			</td>
+			<td class="systemsTable">Options</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="options" name="options"
 					tal:attributes="value opt | nothing" />
@@ -519,9 +501,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>File System ID</strong>
-			</td>
+			<td class="systemsTable">File system ID</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="fsid" name="fsid"
 					tal:attributes="value fsid | nothing" />
@@ -529,9 +509,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Force unmount</strong>
-			</td>
+			<td class="systemsTable">Force unmount</td>
 			<td class="systemsTable">
 				<input type="checkbox" id="forceunmount" name="forceunmount"
 					tal:attributes="
@@ -541,28 +519,32 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
 </div>
 
-<div id="NFSM" metal:define-macro="nfsm_macro">
+<div class="rescfg" id="NFSM" metal:define-macro="nfsm_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype =='24'"
 	tal:define="
 		global edit python: True;
-		global resName res/name;
-		global mountpoint res/attrs/mountpoint;
-		global opt res/attrs/options;
-		global expath res/attrs/export;
-		global nfstype res/attrs/fstype;
-		global hostname res/attrs/host;
-		global force_unmount res/attrs/force_unmount;
+		global resName res/name | nothing;
+		global mountpoint res/attrs/mountpoint | nothing;
+		global opt res/attrs/options | nothing;
+		global expath res/attrs/export | nothing;
+		global nfstype res/attrs/fstype | nothing;
+		global hostname res/attrs/host | nothing;
+		global force_unmount res/attrs/force_unmount | nothing;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname+'&edit=1'" />
@@ -574,17 +556,21 @@
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
-	<form name="nfsm_form" method="get" tal:attributes="action processURL">
 
-	<strong class="reshdr">NFS Mount Resource Configuration</strong>
+	<div class="reshdr">NFS Mount Resource Configuration</div>
 
+	<form name="nfsm_form" method="get" tal:attributes="action processURL">
 	<input name="pagetype" type="hidden" value="35" />
 
-	<input name="clustername" type="hidden" tal:attributes="value cluname"/>
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
+	<input name="clustername" type="hidden"
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
 		tal:condition="python: edit == True"
-		tal:attributes="value res/name" />
+		tal:attributes="value res/name | nothing" />
 
 	<input name="edit" type="hidden" value="1"
 		tal:condition="python: edit == True" />
@@ -593,9 +579,7 @@
 
 	<table id="nfsMountTable" class="systemsTable">
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Resource Name</strong>
-			</td>
+			<td class="systemsTable">Name</td>
 			<td class="systemsTable">
 				<input type="text" size="20"
 					id="resourceName" name="resourceName"
@@ -604,9 +588,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Mount Point</strong>
-			</td>
+			<td class="systemsTable">Mount point</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="mountpoint" name="mountpoint"
 					tal:attributes="value mountpoint | nothing" />
@@ -614,9 +596,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Host</strong>
-			</td>
+			<td class="systemsTable">Host</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="host" name="host"
 					tal:attributes="value hostname | nothing" />
@@ -624,19 +604,15 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Export Path</strong>
-			</td>
+			<td class="systemsTable">Export path</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="exportpath" name="exportpath"
-					tal:attributes="value expath | nothing"/>
+					tal:attributes="value expath | nothing" />
 			</td>
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>NFS Type</strong>
-			</td>
+			<td class="systemsTable">NFS version</td>
 			<td class="systemsTable">
 				<input type="radio" name="nfstype" value="nfs"
 					tal:attributes="checked python: (edit == nothing or nfstype == 'nfs') and 'checked'" />
@@ -649,19 +625,15 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Options</strong>
-			</td>
+			<td class="systemsTable">Options</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="options" name="options"
-					tal:attributes="value opt | nothing"/>
+					tal:attributes="value opt | nothing" />
 			</td>
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Force unmount</strong>
-			</td>
+			<td class="systemsTable">Force unmount</td>
 			<td class="systemsTable">
 				<input type="checkbox" id="forceunmount" name="forceunmount"
 					tal:attributes="
@@ -671,24 +643,28 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
 </div>
 
-<div id="NFSC" metal:define-macro="nfsc_macro">
+<div class="rescfg" id="NFSC" metal:define-macro="nfsc_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype == '24'"
 	tal:define="
 		global edit python: True;
-		global resName res/name;
-		global target res/attrs/target;
-		global opt res/attrs/options;
+		global resName res/name | nothing;
+		global target res/attrs/target | nothing;
+		global opt res/attrs/options | nothing;
 		global expath res/attrs/readOnly | nothing;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
@@ -701,17 +677,20 @@
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
 
-	<strong class="reshdr">NFS Client Resource Configuration</strong>
+	<div class="reshdr">NFS Client Resource Configuration</div>
 
 	<form name="nfsc_form" method="post" tal:attributes="action processURL">
-
 	<input name="pagetype" type="hidden" value="35" />
 
-	<input name="clustername" type="hidden" tal:attributes="value cluname" />
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
+	<input name="clustername" type="hidden"
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
 		tal:condition="python: edit == True"
-		tal:attributes="value res/name" />
+		tal:attributes="value res/name | nothing" />
 
 	<input name="edit" type="hidden" value="1"
 		tal:condition="python: edit == True" />
@@ -720,9 +699,7 @@
 
 	<table id="nfsClientTable" class="systemsTable">
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Resource Name</strong>
-			</td>
+			<td class="systemsTable">Name</td>
 			<td class="systemsTable">
 				<input type="text" size="20"
 					id="resourceName" name="resourceName"
@@ -731,9 +708,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Target</strong>
-			</td>
+			<td class="systemsTable">Target</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="target" name="target"
 					tal:attributes="value target | nothing"/>
@@ -741,9 +716,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Options</strong>
-			</td>
+			<td class="systemsTable">Options</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="options" name="options"
 					tal:attributes="value opt | nothing"/>
@@ -752,22 +725,26 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
 </div>
 
-<div id="NFSX" metal:define-macro="nfsx_macro">
+<div class="rescfg" id="NFSX" metal:define-macro="nfsx_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype == '24'"
 	tal:define="
 		global edit python: True;
-		global resName res/name;
+		global resName res/name | nothing;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname+'&edit=1'" />
@@ -778,16 +755,20 @@
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
 
-	<strong class="reshdr">NFS Export Resource Configuration</strong>
-	<form name="nfsx_form" method="post" tal:attributes="action processURL">
+	<div class="reshdr">NFS Export Resource Configuration</div>
 
+	<form name="nfsx_form" method="post" tal:attributes="action processURL">
 	<input name="pagetype" type="hidden" value="35" />
 
-	<input name="clustername" type="hidden" tal:attributes="value cluname" />
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
+	<input name="clustername" type="hidden"
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
 		tal:condition="python: edit == True"
-		tal:attributes="value res/name" />
+		tal:attributes="value res/name | nothing" />
 
 	<input name="edit" type="hidden" value="1"
 		tal:condition="python: edit == True" />
@@ -796,9 +777,7 @@
 
 	<table id="nfsExportTable" class="systemsTable">
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Name</strong>
-			</td>
+			<td class="systemsTable">Name</td>
 			<td class="systemsTable">
 				<input type="text" size="20"
 					id="resourceName" name="resourceName"
@@ -808,23 +787,27 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
 </div>
 
-<div id="SCR" metal:define-macro="scr_macro">
+<div class="rescfg" id="SCR" metal:define-macro="scr_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype == '24'"
 	tal:define="
 		global edit string:true;
-		global resName res/name;
-		global filename res/attrs/file;
+		global resName res/name | nothing;
+		global filename res/attrs/file | nothing;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname+'&edit=1'" />
@@ -835,16 +818,20 @@
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
 
-	<strong class="reshdr">Script Resource Configuration</strong>
-	<form name="scr_form" method="post" tal:attributes="action processURL">
+	<div class="reshdr">Script Resource Configuration</div>
 
+	<form name="scr_form" method="post" tal:attributes="action processURL">
 	<input name="pagetype" type="hidden" value="35" />
 
-	<input name="clustername" type="hidden" tal:attributes="value cluname" />
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
+	<input name="clustername" type="hidden"
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
 		tal:condition="python: edit == 'true'"
-		tal:attributes="value res/name" />
+		tal:attributes="value res/name | nothing" />
 
 	<input name="edit" type="hidden" value="1"
 		tal:condition="python: edit == 'true'" />
@@ -853,9 +840,7 @@
 
 	<table id="scriptTable" class="systemsTable">
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Name</strong>
-			</td>
+			<td class="systemsTable">Name</td>
 			<td class="systemsTable">
 				<input type="text" size="20"
 					id="resourceName" name="resourceName"
@@ -864,9 +849,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Full path to script file</strong>
-			</td>
+			<td class="systemsTable">Full path to script file</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="file" name="file"
 					tal:attributes="value filename | nothing" />
@@ -875,45 +858,55 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
 </div>
 
-<div id="SMB" metal:define-macro="smb_macro">
+<div class="rescfg" id="SMB" metal:define-macro="smb_macro">
 <tal:block tal:condition="python: ptype == '33' or ptype == '24'"
 	tal:define="
 		global edit python: True;
-		global resName res/name;
-		global workgroup res/attrs/workgroup;
+		global resName res/name | nothing;
+		global workgroup res/attrs/workgroup | nothing;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname+'&edit=1'" />
 
 <tal:block tal:condition="python: ptype != '33'"
 	tal:define="
-		global edit python: false;
+		global edit python: False;
 		global cluname request/clustername;
 		tmp_URL context/cluster/index_html/absolute_url;
 		global processURL python: tmp_URL+'?pagetype=35&clustername='+cluname" />
 
-	<strong class="reshdr" tal:condition="python: edit != 'true'">Samba Server Configuration</strong>
+	<div class="reshdr"
+		tal:condition="python: edit != 'true'">
+		Samba Server Configuration
+	</div>
 
 	<form name="smb_form" method="post" tal:attributes="action processURL">
-
 	<input name="pagetype" type="hidden" value="35" />
 
-	<input name="clustername" type="hidden" tal:attributes="value cluname" />
+	<input name="tree_level" type="hidden"
+		tal:attributes="value res/indent_ctr | string:0" />
+
+	<input name="clustername" type="hidden"
+		tal:attributes="value cluname | nothing" />
 
 	<input name="oldname" type="hidden"
 		tal:condition="python: edit == True"
-		tal:attributes="value res/name" />
+		tal:attributes="value res/name | nothing" />
 
 	<input name="edit" type="hidden" value="1"
 		tal:condition="python: edit == True" />
@@ -922,9 +915,7 @@
 
 	<table id="sambaTable" class="systemsTable">
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Name</strong>
-			</td>
+			<td class="systemsTable">Name</td>
 			<td class="systemsTable">
 				<input type="text" size="20"
 					id="resourceName" name="resourceName"
@@ -933,9 +924,7 @@
 		</tr>
 
 		<tr class="systemsTable">
-			<td class="systemsTable">
-				<strong>Workgroup</strong>
-			</td>
+			<td class="systemsTable">Workgroup</td>
 			<td class="systemsTable">
 				<input type="text" size="20" id="workgroup" name="workgroup"
 					tal:attributes="value workgroup | nothing"/>
@@ -944,12 +933,16 @@
 	</table>
 
 	<div class="hbSubmit">
-		<input class="hbSubmit" type="button" value="Submit"
-			onClick="validate(this.form);" />
-		<input class="hbSubmit" type="button" value="Add a child resource"
-			onClick="add_child_resource(this.form)" />
-		<input class="hbSubmit" value="Delete this resource"
-			onClick="delete_resource(this.form)" />
+		<input class="hbSubmit" type="button"
+			onClick="validate_form(this.form);"
+			tal:attributes="value python: sinfo and 'Apply' or 'Submit'" />
+
+		<tal:block tal:condition="sinfo">
+			<input class="hbSubmit" type="button" value="Add a child"
+				onClick="add_child_resource(this.form)" />
+			<input class="hbSubmit" type="button" value="Delete this resource"
+				onClick="delete_resource(this.form)" />
+		</tal:block>
 	</div>
 
 	</form>
--- conga/luci/cluster/resource_form_handlers.js	2006/09/06 22:13:33	1.4
+++ conga/luci/cluster/resource_form_handlers.js	2006/09/08 22:54:32	1.5
@@ -18,11 +18,14 @@
 
 function validate_ip(form) {
 	var errors = new Array();
-	var ipstr =	form.ip1.value + '.' + form.ip2.value + '.' +
-				form.ip3.value + '.' + form.ip4.value;
 
+	if (!form.ip_address || str_is_blank(form.ip_address.value))
+		errors.push('You did not give an IP address.');
+	return (errors);
+
+	var ipstr =	form.ip_address.value;
 	if (!isValidHost(ipstr))
-		errors.push('\"' + ipstr '\" is an invalid IP address.');
+		errors.push('\"' + ipstr + '\" is an invalid IP address.');
 	return (errors);
 }
 
@@ -71,7 +74,7 @@
 }
 
 function validate_gfs(form) {
-	my errors = new Array();
+	var errors = new Array();
 
 	if (!form.mountpoint || str_is_blank(form.mountpoint.value))
 		errors.push('No file system mount point was given.');
@@ -110,20 +113,33 @@
 form_validators['scr'] = validate_script;
 form_validators['smb'] = validate_samba;
 
-function validate(form) {
+function validate_form(form) {
 	var valfn = form_validators[form.type.value];
 	if (!valfn)
 		return (-1);
 
-	if (error_dialog(valfn(form)))
+	var errors = valfn(form);
+	if (form.type.value != 'ip') {
+		if (!form.resourceName || str_is_blank(form.resourceName.value))
+			errors.push('No resource name was given.')
+	}
+
+	if (error_dialog(errors))
 		return (-1);
 	form.submit();
 }
 
 function delete_resource(form) {
-	return (-1);
+	if (!confirm('Are you sure you want to delete this resource?'))
+		return (-1);
+	form.submit();
 }
 
 function add_child_resource(form) {
-	return (-1);
+	if (!form || !form.tree_level ||
+		!is_valid_int(form.tree_level.value, 0, null))
+	{
+		return (-1);
+	}
+	var level = Number(form.tree_level.value);
 }
--- conga/luci/homebase/homebase_common.js	2006/09/05 21:25:45	1.7
+++ conga/luci/homebase/homebase_common.js	2006/09/08 22:54:32	1.8
@@ -8,14 +8,22 @@
 	else
 		span = null;
 
-	div = image.parentNode.parentNode.parentNode.getElementsByTagName('div');
-	if (!div || div.length < 1)
+	var div = null;
+	divc = image.parentNode.parentNode.parentNode.getElementsByTagName('div');
+	for (var i = 0 ; i < divc.length ; i++) {
+		if (divc[i].className.match(/service_comp/)) {
+			div = divc[i];
+			break;
+		}
+	}
+
+	if (!div)
 		return (-1);
-	div = div[3];
 
 	if (image.name == 'arrow_down') {
 		image.src = 'arrow_right.png';
 		image.name = 'arrow_right';
+		image.alt = '[-]';
 		if (span)
 			span.innerHTML = 'Show Children';
 		div.style.visibility = 'hidden';
@@ -23,6 +31,7 @@
 	} else {
 		image.src = 'arrow_down.png';
 		image.name = 'arrow_down';
+		image.alt = '[+]';
 		if (span)
 			span.innerHTML = 'Hide Children';
 		div.style.visibility = 'inherit';
--- conga/luci/homebase/luci_homebase.css	2006/09/06 22:14:42	1.19
+++ conga/luci/homebase/luci_homebase.css	2006/09/08 22:54:32	1.20
@@ -259,14 +259,23 @@
 strong.cluster {
 	text-align: top;
 	font-size: 9pt;
-	letter-spacing: 120%;
+	letter-spacing: +1px;
 }
 
+*.reshdr {
+	text-align: top;
+	font-size: 9pt;
+	letter-spacing: +1px;
+	font-weight: 600;
+	padding-bottom: +1em;
+}
+
+
 strong.service_name,
 strong.node_name,
 strong.cluster_name {
 	font-size: 10pt;
-	letter-spacing: 120%;
+	letter-spacing: +1px;
 }
 
 td.service_name,
@@ -363,38 +372,36 @@
 }
 
 div.systemsList {
-	margin-top: .25em !important;
-	margin-bottom: .25em !important;
+	margin-top: .25em ! important;
+	margin-bottom: .25em ! important;
 }
 
-div.service_comp {
+#service_comp_list {
 	background: #dee7ec;
-	padding: .66em;
-	margin-top: .5em;
-}
-
-div.rc_indent0 {
-	margin-left: 0px;
 	max-width: 700px;
+	padding: 1em;
 }
 
-div.rc_indent1 {
-	margin-left: 30px 
-}
-
-div.rc_indent2 {
-	margin-left: 60px 
+div.rescfg {
+	background: #dee7ec;
 }
 
-div.rc_indent3 {
-	margin-left: 90px 
+div.service_comp {
+	background: #dee7ec;
+	padding: .66em;
+	margin-top: .5em;
+	border-left: thin solid #c9c9c9;
+	border-bottom: thin solid #c9c9c9;
+	margin-bottom: 1em;
 }
 
-div.rc_indent4 {
-	margin-left: 120px 
+div.service_comp > div.service_comp {
+	margin-left: +20px;
+	margin-bottom: 0px;
+	border-bottom: none;
 }
-
-div.rc_indent5 {
-	margin-left: 150px 
+/*
+div.rc_indent0 {
+	border-left: none ! important;
 }
-
+*/
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/30 22:59:01	1.62
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/08 22:54:32	1.63
@@ -150,7 +150,7 @@
 			cluster_properties['isComplete'] = False
 			errors.append(error)
 			return (False, {'errors': errors, 'requestResults':cluster_properties })
-                batch_id_map = {}
+		batch_id_map = {}
 		for i in nodeList:
 			try:
 				rc = RicciCommunicator(i['ricci_host'])
@@ -1786,21 +1786,23 @@
   return resList
                                                                                 
 def getResourceInfo(modelb, request):
-  resMap = {}
-  name = request['resourcename']
-  baseurl = request['URL']
-  cluname = request['clustername']
-  for res in modelb.getResources():
-    if res.getName() == name:
-          resMap['name'] = res.getName()
-	  resMap['type'] = res.resource_type
-	  resMap['tag_name'] = res.TAG_NAME
-	  resMap['attrs'] = res.attr_hash
-	  resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
-          return resMap
-
-  return {}
+	resMap = {}
 
+	try:
+		name = request['resourcename']
+		baseurl = request['URL']
+		cluname = request['clustername']
+
+		for res in modelb.getResources():
+			if res.getName() == name:
+				resMap['name'] = res.getName()
+				resMap['type'] = res.resource_type
+				resMap['tag_name'] = res.TAG_NAME
+				resMap['attrs'] = res.attr_hash
+				resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
+				return resMap
+	except: pass
+	return {}
 
 def delResource(self, request, ragent):
   modelb = request.SESSION.get('model')
@@ -1906,13 +1908,13 @@
   else:
     res = apply(Ip)
   form = request.form
-  addr = form["ip1"]+"."+form["ip2"]+"."+form["ip3"]+"."+form["ip4"]
+  addr = form['ip_address']
   res.attr_hash["address"] = addr
   if form.has_key('monitorLink'):
     res.attr_hash["monitor_link"] = '1'
   else:
     res.attr_hash["monitor_link"] = '0'
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
 
 def addFs(request):
@@ -1943,7 +1945,7 @@
   else:
     res.attr_hash["force_fsck"] = '0'
                                                                                 
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
                                                                                 
 def addGfs(request):
@@ -1964,7 +1966,7 @@
   else:
     res.attr_hash["force_unmount"] = '0'
                                                                                 
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
 
 def addNfsm(request):
@@ -1986,7 +1988,7 @@
   else:
     res.attr_hash["force_unmount"] = '0'
                                                                                 
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
                                                                                 
 def addNfsc(request):
@@ -2000,7 +2002,7 @@
   res.attr_hash["target"] = form["target"]
   res.attr_hash["options"] = form["options"]
                                                                                 
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
                                                                                 
 def addNfsx(request):
@@ -2012,7 +2014,7 @@
   form = request.form
   res.attr_hash["name"] = form["resourceName"]
                                                                                 
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
 
 def addScr(request):
@@ -2025,7 +2027,7 @@
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["file"] = form["file"]
                                                                                 
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
                                                                                 
 def addSmb(request):
@@ -2038,7 +2040,7 @@
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["workgroup"] = form["workgroup"]
                                                                                 
-  modelb.getResourcesPtr().addChild(res);
+  modelb.getResourcesPtr().addChild(res)
   return res
                                                                                 
 def appendModel(request, model):



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-07-19 20:20 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-07-19 20:20 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-19 20:20:53

Modified files:
	luci/cluster   : form-macros index_html 
	luci/homebase  : form-macros index_html 

Log message:
	cluster create, and cluster remove (stop managing) bits

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.4&r2=1.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.3&r2=1.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/form-macros.diff?cvsroot=cluster&r1=1.28&r2=1.29
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/index_html.diff?cvsroot=cluster&r1=1.12&r2=1.13

--- conga/luci/cluster/form-macros	2006/07/14 16:09:39	1.4
+++ conga/luci/cluster/form-macros	2006/07/19 20:20:53	1.5
@@ -68,17 +68,25 @@
   <div metal:define-macro="cluster-form">
    <h2>Cluster Form</h2>
   </div>
-  <div metal:define-macro="clusteradd-form">
+
+
+
+  <div metal:define-macro="clusteradd-form" style="margin-left: 1em">
 	<script type="text/javascript" src="/luci/homebase/homebase_common.js">
 	</script>
 	<script type="text/javascript" src="/luci/homebase/validate_cluster_add.js">
 	</script>
 
+	<tal:block tal:omit-tag=""
+		tal:define="global sessionObj python:request.SESSION.get('checkRet')" />
+
 	<form name="adminform" action="" method="post">
-		<input name="numStorage" id="numStorage" type="hidden" value="3" />
+		<input name="pagetype" id="pagetype" type="hidden" value="6" />
 
 		<h2>Add a Cluster</h2>
 
+		<tal:block tal:condition="python: not sessionObj or not 'requestResults' in sessionObj or not 'nodeList' in sessionObj['requestResults']">
+		<input name="numStorage" type="hidden" value="3" />
 		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
 			<thead class="systemsTable">
 				<tr class="systemsTable"><td class="systemsTable" colspan="2">
@@ -136,6 +144,78 @@
 				</tr>
 			</tbody>
 		</table>
+		</tal:block>
+
+		<tal:block tal:condition="python: sessionObj and 'requestResults' in sessionObj and 'nodeList' in sessionObj['requestResults']">
+
+		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
+			<thead class="systemsTable">
+				<tr class="systemsTable"><td class="systemsTable" colspan="2">
+					<div class="systemsTableTop">
+						<strong>Cluster Name:</strong>
+						<input type="text" id="clusterName" name="clusterName" tal:attributes="value python: sessionObj['requestResults']['clusterName']" />
+					</div>
+				</td></tr>
+				<tr class="systemsTable">
+					<th class="systemsTable">Node Hostname</th>
+					<th class="systemsTable">Root Password</th>
+				</tr>
+			</thead>
+
+			<tfoot class="systemsTable">
+				<tr class="systemsTable"><td colspan="2" class="systemsTable">
+					<div>
+						<input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if cluster node passwords are identical.
+					</div>
+				</td></tr>
+				<tr class="systemsTable"><td class="systemsTable" colspan="2">
+					<div class="systemsTableEnd">
+						<input type="button" class="hbSubmit" value="Add Another Row" onClick="addSystem(adminform);" />
+					</div>
+				</td></tr>
+			</tfoot>
+
+			<span tal:omit-tag=""
+				tal:define="global sysNum python: 0"
+			/>
+
+			<tbody class="systemsTable">
+			<tal:block tal:repeat="node python: sessionObj['requestResults']['nodeList']">
+				<span tal:omit-tag=""
+					tal:define="global nodeAuth python: node['cur_auth']" />
+
+				<tr class="systemsTable">
+					<td class="systemsTable">
+						<input type="text"
+							tal:attributes="
+								id python: '__SYSTEM' + str(sysNum) + ':Addr';
+								name python: '__SYSTEM' + str(sysNum) + ':Addr';
+								value python: node['ricci_host'];
+								class python: 'hbInputSys' + ('errors' in node and ' error' or '')"
+						 />
+					</td>
+					<td class="systemsTable">
+						<input
+							onChange="pwd0Change(adminform);"
+							tal:attributes="
+								type python: nodeAuth and 'text' or 'password';
+								value python: nodeAuth and '[authenticated]' or '';
+								class python: 'hbInputPass' + ('errors' in node and ' error' or '');
+								id python: '__SYSTEM' + str(sysNum) + ':Passwd';
+								name python: '__SYSTEM' + str(sysNum) + ':Passwd';
+						/>
+					</td>
+				</tr>
+				<span tal:omit-tag=""
+					tal:define="global sysNum python: sysNum + 1"
+				/>
+			</tal:block>
+			</tbody>
+		</table>
+
+		<input type="hidden" name="numStorage"
+			tal:attributes="value python: sysNum" />
+		</tal:block>
 
 		<div class="hbSubmit" id="hbSubmit">
 			<input type="button" class="hbSubmit" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
@@ -259,7 +339,7 @@
 	</script>
 
 	<form name="adminform" action="" method="post">
-		<input name="numStorage" id="numStorage" type="hidden" value="1" />
+		<input name="numStorage" id="numStorage" type="hidden" value="0" />
 
 		<h2>Add a Node to a Cluster</h2>
 
--- conga/luci/cluster/index_html	2006/07/05 20:26:00	1.3
+++ conga/luci/cluster/index_html	2006/07/19 20:20:53	1.4
@@ -195,6 +195,33 @@
              <metal:main-form-content use-macro="here/form-chooser/macros/main-form">
                 <h1>Future Site of Forms</h1>
              </metal:main-form-content>
+
+		<span tal:omit-tag=""
+			tal:define="global ret python: request.SESSION.get('checkRet')"
+		/>
+
+		<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
+			<div class="hbclosebox">
+				<a href="javascript:hide_element('retmsgsdiv');"><img src="../homebase/x.png"></a>
+			</div>
+			<ul class="retmsgs">
+				<tal:block repeat="e python:ret['messages']">
+					<li class="retmsgs" tal:content="python:e" />
+				</tal:block>
+			</ul>
+		</div>
+
+		<div id="errmsgsdiv" class="errmsgs" tal:condition="python:(ret and 'errors' in ret and len(ret['errors']))">
+			<div class="hbclosebox">
+				<a class="hbclosebox" href="javascript:hide_element('errmsgsdiv');"><img src="../homebase/x.png"></a>
+			</div>
+			<p class="errmsgs">The following errors occurred:</p>
+			<ul class="errmsgs">
+				<tal:block repeat="e python:ret['errors']">
+					<li class="errmsgs" tal:content="python:e" />
+				</tal:block>
+			</ul>
+		</div>
             </td>
             <tal:comment replace="nothing"> End of main content block </tal:comment>
 
--- conga/luci/homebase/form-macros	2006/07/18 19:25:20	1.28
+++ conga/luci/homebase/form-macros	2006/07/19 20:20:53	1.29
@@ -1,7 +1,7 @@
 <html>
 
 <tal:comment replace="nothing">
-	$Id: form-macros,v 1.28 2006/07/18 19:25:20 rmccabe Exp $
+	$Id: form-macros,v 1.29 2006/07/19 20:20:53 rmccabe Exp $
 </tal:comment>
 
 <head>
@@ -71,8 +71,6 @@
 
 		<input name="absoluteURL" type="hidden"
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
-		<input name="baseURL" type="hidden"
-			tal:attributes="value python:data['children'][data['curIndex']]['base_url']" />
 
 		<div class="hbSubmit" tal:condition="python:userList" id="hbSubmit">
 			<input class="hbSubmit" name="Submit" type="button" value="Submit" onClick="validateForm(document.adminform);" />
@@ -135,8 +133,6 @@
 
 		<input name="absoluteURL" type="hidden"
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
-		<input name="baseURL" type="hidden"
-			tal:attributes="value python:data['children'][data['curIndex']]['base_url']" />
 
 		<div class="hbSubmit" id="hbSubmit">
 			<input class="hbSubmit" name="Submit" type="button" value="Submit" onClick="validateForm(document.adminform);" />
@@ -431,9 +427,6 @@
 
 		<input name="absoluteURL" type="hidden"
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
-		<input name="baseURL" type="hidden"
-			tal:attributes="value python:data['children'][data['curIndex']]['base_url']" />
-
 
 		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
 			<thead class="systemsTable">
@@ -507,9 +500,6 @@
 		<input name="absoluteURL" type="hidden"
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
 
-		<input name="baseURL" type="hidden"
-			tal:attributes="value python:data['children'][data['curIndex']]['base_url']" />
-
 		<h2 class="homebase">Add Cluster</h2>
 
 		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
@@ -541,14 +531,6 @@
 
 			<tbody class="systemsTable">
 			<tal:block tal:repeat="node python: sessionObj['requestResults']['nodeList']">
-			
-				<span tal:omit-tag="" tal-condition="python: 'errors' in node"
-					tal:define="global nodeClassExt python: ' error'" />
-
-				<span tal:omit-tag=""
-					tal-condition="python: not 'errors' in node"
-					tal:define="global nodeClassExt python: ''" />
-
 				<span tal:omit-tag=""
 					tal:define="global nodeAuth python: node['cur_auth']" />	
 
@@ -559,7 +541,7 @@
 								id python: '__SYSTEM' + str(sysNum) + ':Addr';
 								name python: '__SYSTEM' + str(sysNum) + ':Addr';
 								value python: node['ricci_host'];
-								class python: 'hbInputSys' + nodeClassExt;
+								class python: 'hbInputSys' + ('errors' in node and ' error' or '');
 								disabled python: nodeAuth and 1 or 0"
 						 />
 					</td>
@@ -568,7 +550,7 @@
 							tal:attributes="
 								type python: nodeAuth and 'text' or 'password';
 								value python: nodeAuth and '[authenticated]' or '';
-								class python: 'hbInputPass' + nodeClassExt;
+								class python: 'hbInputPass' + ('errors' in node and ' error' or '');
 								id python: '__SYSTEM' + str(sysNum) + ':Passwd';
 								name python: '__SYSTEM' + str(sysNum) + ':Passwd';
 								disabled python: nodeAuth and 1 or 0"
@@ -617,9 +599,6 @@
 
 		<input name="absoluteURL" type="hidden"
 			tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
-		<input name="baseURL" type="hidden"
-			tal:attributes="value python:data['children'][data['curIndex']]['base_url']" />
-
 		<h2 class="homebase">Manage an Existing Cluster</h2>
 
 		<p class="hbText">Enter one node from the cluster you wish to add to the Luci management interface.</p>
--- conga/luci/homebase/index_html	2006/07/18 19:25:20	1.12
+++ conga/luci/homebase/index_html	2006/07/19 20:20:53	1.13
@@ -15,7 +15,7 @@
 					xml:lang language">
 
 <tal:comment replace="nothing">
-	$Id: index_html,v 1.12 2006/07/18 19:25:20 rmccabe Exp $
+	$Id: index_html,v 1.13 2006/07/19 20:20:53 rmccabe Exp $
 </tal:comment>
 
 <head metal:use-macro="here/header/macros/html_header">
@@ -64,7 +64,6 @@
 </head>
 
 
-
 <body tal:attributes="class here/getSectionFromURL;
 						dir python:test(isRTL, 'rtl', 'ltr')">
 	<div id="visual-portal-wrapper">



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Cluster-devel] conga/luci cluster/form-macros cluster/index_h ...
@ 2006-07-05 20:13 rmccabe
  0 siblings, 0 replies; 11+ messages in thread
From: rmccabe @ 2006-07-05 20:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-05 20:13:03

Modified files:
	luci/cluster   : form-macros index_html main_footer 
	luci/homebase  : luci_homebase.css validate_cluster_add.js 
	luci/site/luci/Extensions: homebase_adapters.py 

Log message:
	cluster create and add node to cluster frontend

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/main_footer.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/luci_homebase.css.diff?cvsroot=cluster&r1=1.6&r2=1.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.11&r2=1.12

--- conga/luci/cluster/form-macros	2006/06/30 17:00:02	1.1
+++ conga/luci/cluster/form-macros	2006/07/05 20:13:03	1.2
@@ -66,7 +66,78 @@
    <h2>Cluster Form</h2>
   </div>
   <div metal:define-macro="clusteradd-form">
-   <h2>Cluster Add Form</h2>
+	<script type="text/javascript" src="/luci/homebase/homebase_common.js">
+	</script>
+	<script type="text/javascript" src="/luci/homebase/validate_cluster_add.js">
+	</script>
+
+	<form name="adminform" action="" method="post">
+		<input name="numStorage" id="numStorage" type="hidden" value="3" />
+
+		<h2>Add a Cluster</h2>
+
+		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
+			<thead class="systemsTable">
+				<tr class="systemsTable"><td class="systemsTable" colspan="2">
+					<div class="systemsTableTop">
+						<strong>Cluster Name</strong>
+						<input class="hbInputSys" type="text" id="clusterName" name="clusterName" />
+					</div>
+				</td></tr>
+				<tr class="systemsTable">
+					<th class="systemsTable">System Hostname</th>
+					<th class="systemsTable">Password</th>
+				</tr>
+			</thead>
+
+			<tfoot class="systemsTable">
+				<tr class="systemsTable"><td colspan="2" class="systemsTable">
+					<div>
+						<input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if storage system passwords are identical.
+					</div>
+				</td></tr>
+
+				<tr class="systemsTable"><td class="systemsTable" colspan="2">
+					<div class="systemsTableEnd">
+						<input type="button" class="hbSubmit" value="Add Another Row" onClick="addSystem(adminform);" />
+					</div>
+				</td></tr>
+			</tfoot>
+
+			<tbody class="systemsTable">
+				<tr class="systemsTable">
+					<td class="systemsTable">
+						<input class="hbInputSys" type="text" id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+					</td>
+					<td class="systemsTable">
+						<input type="password" id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd" class="hbInputPass" onChange="pwd0Change(adminform);" />
+					</td>
+				</tr>
+
+				<tr class="systemsTable">
+					<td class="systemsTable">
+						<input class="hbInputSys" type="text" id="__SYSTEM1:Addr" name="__SYSTEM1:Addr" />
+					</td>
+					<td class="systemsTable">
+						<input type="password" id="__SYSTEM1:Passwd" name="__SYSTEM1:Passwd" class="hbInputPass" onChange="pwd0Change(adminform);" />
+					</td>
+				</tr>
+
+				<tr class="systemsTable">
+					<td class="systemsTable">
+						<input class="hbInputSys" type="text" id="__SYSTEM2:Addr" name="__SYSTEM2:Addr" />
+					</td>
+					<td class="systemsTable">
+						<input type="password" id="__SYSTEM2:Passwd" name="__SYSTEM2:Passwd" class="hbInputPass" onChange="pwd0Change(adminform);" />
+					</td>
+				</tr>
+			</tbody>
+		</table>
+
+		<div class="hbSubmit" id="hbSubmit">
+			<input type="button" class="hbSubmit" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+		</div>
+	</form>
   </div>
   <div metal:define-macro="clusterconfig-form">
    <h2>Cluster Configuration Form</h2>
@@ -120,9 +191,67 @@
   <div metal:define-macro="nodeconfig-form">
    <h2>Node Configuration Form</h2>
   </div>
+
   <div metal:define-macro="nodeadd-form">
-   <h2>Node Add Form</h2>
+	<script type="text/javascript" src="/luci/homebase/homebase_common.js">
+	</script>
+
+	<script type="text/javascript" src="/luci/homebase/validate_cluster_add.js">
+	</script>
+
+	<form name="adminform" action="" method="post">
+		<input name="numStorage" id="numStorage" type="hidden" value="1" />
+
+		<h2>Add a Node to a Cluster</h2>
+
+		<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
+			<thead class="systemsTable">
+				<tr class="systemsTable"><td class="systemsTable" colspan="2">
+					<div class="systemsTableTop">
+						<strong>Cluster Name</strong>
+						<select class="hbInputSys" id="clusterName" name="clusterList">
+							<option>Fill this in</option>
+						</select>
+					</div>
+				</td></tr>
+				<tr class="systemsTable">
+					<th class="systemsTable">System Hostname</th>
+					<th class="systemsTable">Password</th>
+				</tr>
+			</thead>
+
+			<tfoot class="systemsTable">
+				<tr class="systemsTable"><td colspan="2" class="systemsTable">
+					<div id="allSameDiv">
+						<input type="checkbox" class="allSameCheckBox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if storage system passwords are identical.
+					</div>
+				</td></tr>
+
+				<tr class="systemsTable"><td class="systemsTable" colspan="2">
+					<div class="systemsTableEnd">
+						<input type="button" class="hbSubmit" value="Add Another Row" onClick="addSystem(adminform);" />
+					</div>
+				</td></tr>
+			</tfoot>
+
+			<tbody class="systemsTable">
+				<tr class="systemsTable">
+					<td class="systemsTable">
+						<input class="hbInputSys" type="text" id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+					</td>
+					<td class="systemsTable">
+						<input type="password" id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd" class="hbInputPass" onChange="pwd0Change(adminform);" />
+					</td>
+				</tr>
+			</tbody>
+		</table>
+
+		<div class="hbSubmit" id="hbSubmit">
+			<input type="button" class="hbSubmit" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+		</div>
+	</form>
   </div>
+
   <div metal:define-macro="nodeprocess-form">
    <h2>Node Process Form</h2>
   </div>
--- conga/luci/cluster/index_html	2006/06/30 17:00:02	1.1
+++ conga/luci/cluster/index_html	2006/07/05 20:13:03	1.2
@@ -54,6 +54,7 @@
       <tal:comment replace="nothing"> A slot where you can insert CSS in the header from a template </tal:comment>
     
   <style type="text/css"><!-- @import url(./clusterportlet.css); --></style>
+  <style type="text/css"><!-- @import url(/luci/homebase/luci_homebase.css); --></style>
       <metal:cssslot define-slot="css_slot" />
     </metal:cssslot>
 
--- conga/luci/cluster/main_footer	2006/06/30 17:00:02	1.1
+++ conga/luci/cluster/main_footer	2006/07/05 20:13:03	1.2
@@ -13,17 +13,11 @@
     2000-<span i18n:name="current_year" 
                tal:define="now modules/DateTime/DateTime" 
                tal:content="now/year" />
-    by 
-    <span>
-        <a href="http://redhat.com/Conga">Red Hat, Luci, Ricci, </a>
-    </span>
-    et al.
+    <a href="http://www.redhat.com/">Red Hat, Inc.</a>
     </span>
 </p>
 
 <p>
-
-
     <span i18n:translate="description_license">
     Distributed under the 
         <span i18n:name="license">
@@ -34,4 +28,4 @@
 
 </div>
 </body>
-</html>
\ No newline at end of file
+</html>
--- conga/luci/homebase/luci_homebase.css	2006/06/22 03:49:54	1.6
+++ conga/luci/homebase/luci_homebase.css	2006/07/05 20:13:03	1.7
@@ -1,4 +1,4 @@
-/* $Id: luci_homebase.css,v 1.6 2006/06/22 03:49:54 rmccabe Exp $ */
+/* $Id: luci_homebase.css,v 1.7 2006/07/05 20:13:03 rmccabe Exp $ */
 
 *.errmsgs,*.retmsgs {
 	list-style-image: none !important;
@@ -139,6 +139,3 @@
   margin-top: .25em !important;
   margin-bottom: .25em !important;
 }
-
-*.hbText {
-}
--- conga/luci/homebase/validate_cluster_add.js	2006/06/30 18:06:10	1.1
+++ conga/luci/homebase/validate_cluster_add.js	2006/07/05 20:13:03	1.2
@@ -1,9 +1,19 @@
 function validateForm(form) {
 	var errors = new Array();
 
-	if (!form || !form.clusterName ||
-		str_is_blank(form.clusterName.value))
-	{
+	if (!form)
+		return (-1);
+
+	if (form.clusterList) {
+		i = form.clusterList.selectedIndex;
+		if (i < 0 || !form.clusterList[i])
+			errors.push('You have not selected a valid cluster.');
+		else
+			clusterName = form.clusterList[i].value;
+	} else if (form.clusterName)
+		clusterName = form.clusterName.value;
+
+	if (str_is_blank(clusterName)) {
 		errors.push('No cluster name was given.');
 	} else {
 		var invalid_chars = str_is_valid(form.clusterName.value, '/[0-9A-Za-z_. -]/g');
@@ -16,6 +26,12 @@
 	if (error_dialog(errors))
 		return (-1);
 
+	if (!added_storage || added_storage.length < 1)
+		errors.push('You have not added any cluster nodes.');
+
+	if (error_dialog(errors))
+		return (-1);
+
 	if (confirm("Submit form?"))
 		form.submit();
 
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/06/30 23:07:32	1.11
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/07/05 20:13:03	1.12
@@ -530,7 +530,8 @@
 		for i in params:
 			cur['absolute_url'] += '&' + cgi.escape(i) + '=' + cgi.escape(params[i])
 	else:
-		cur['base_url'] = cur['absolute_url']
+		if cur and 'absolute_url' in cur:
+			cur['base_url'] = cur['absolute_url']
 
 	ret['children'] = temp
 	return ret



^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2007-07-26  4:16 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-02-20 23:07 [Cluster-devel] conga/luci cluster/form-macros cluster/index_h rmccabe
  -- strict thread matches above, loose matches on Subject: below --
2007-07-26  4:16 rmccabe
2007-02-20 23:09 rmccabe
2006-12-21  5:08 rmccabe
2006-11-07 21:33 rmccabe
2006-11-03 19:13 rmccabe
2006-10-31 17:28 rmccabe
2006-10-16  4:26 rmccabe
2006-09-08 22:54 rmccabe
2006-07-19 20:20 rmccabe
2006-07-05 20:13 rmccabe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.