All of lore.kernel.org
 help / color / mirror / Atom feed
* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-02-13 19:50 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-02-13 19:50 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-02-13 19:50:39

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix for service info display when a gulm cluster is unresponsive.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.6&r2=1.227.2.7

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/12 20:25:42	1.227.2.6
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/13 19:50:39	1.227.2.7
@@ -4569,7 +4569,9 @@
   infohash['currentservices'] = svc_dict_list
 
   fdom_dict_list = list()
+  gulm_cluster = False
   if model:
+    gulm_cluster = model.getGULMPtr() is not None
     try:
       infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
     except:
@@ -4607,7 +4609,7 @@
     if rc is not None:
       dlist = list()
       dlist.append("ccsd")
-      if model.getGULMPtr() is None:
+      if not gulm_cluster:
         dlist.append("cman")
         dlist.append("fenced")
       else:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2008-07-17 16:36 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2008-07-17 16:36 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2008-07-17 16:36:56

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix use of an undeclared variable

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.45&r2=1.120.2.46

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2008/07/14 16:29:28	1.120.2.45
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2008/07/17 16:36:56	1.120.2.46
@@ -1169,7 +1169,6 @@
 				msg_list.append('Fix the error and try again:\n')
 			else:
 				msg_list.append('PASSED\n')
-				model.setModified(True)
 				msg_list.append('DONE\n')
 				msg_list.append('Propagating the new cluster.conf')
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2008-04-18 20:37 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2008-04-18 20:37 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2008-04-18 20:37:46

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Pass in the right object.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.26&r2=1.227.2.27

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2008/03/25 01:27:12	1.227.2.26
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2008/04/18 20:37:45	1.227.2.27
@@ -1017,7 +1017,7 @@
 			luci_log.debug_verbose('validateFdom0: no model')
 		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
-	ret = validate_fdom(self, request)
+	ret = validate_fdom(model, request)
 	if ret[0] is not True:
 		return ret
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-12-12 15:45 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-12-12 15:45 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-12-12 15:45:27

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Use new form validation routines

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.275&r2=1.276

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/11/06 23:05:07	1.275
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/12/12 15:45:27	1.276
@@ -7,44 +7,34 @@
 
 from xml.dom import minidom
 
-from ClusterModel.FailoverDomain import FailoverDomain
-from ClusterModel.FailoverDomainNode import FailoverDomainNode
-from ClusterModel.RefObject import RefObject
 from ClusterModel.ClusterNode import ClusterNode
-from ClusterModel.Service import Service
-from ClusterModel.Lockserver import Lockserver
-from ClusterModel.Vm import Vm
-from ClusterModel.FenceXVMd import FenceXVMd
-from ClusterModel.QuorumD import QuorumD
-from ClusterModel.Heuristic import Heuristic
-from ClusterModel.Fence import Fence
-from ClusterModel.Method import Method
 
 import RicciQueries as rq
 from HelperFunctions import resolveOSType, send_batch_to_hosts
 from LuciSyslog import get_logger
-from ResourceHandler import create_resource
 from homebase_adapters import parseHostForm
 from LuciClusterActions import propagateClusterConfAsync
 
+from LuciZopeAsync import validate_clusvc_async
+
 from LuciClusterInfo import getClusterInfo, \
 	getModelBuilder, LuciExtractCluModel
 
 from conga_constants import BATCH_ID, CLUNODE_CREATE_ERRORS, \
 	CLUSTER_ADD, CLUSTER_CONFIG, CLUSTER_DAEMON, CLUSTER_DELETE, \
 	CLUSTER_FOLDER_PATH, CLUSTER_RESTART, CLUSTER_START, CLUSTER_STOP, \
-	DISABLE_SVC_TASK, ENABLE_SVC_TASK, FDOM, FDOM_ADD, FENCEDEV, \
+	DISABLE_SVC_TASK, ENABLE_SVC_TASK, FDOM, FENCEDEV, \
 	FENCEDEV_NODE_CONFIG, FENCEDEVS, FLAG_DESC, INSTALL_TASK, CLUSTER_PROCESS, \
 	LAST_STATUS, LUCI_DEBUG_MODE, NODE, NODE_ADD, NODE_DELETE, \
 	NODE_FENCE, NODE_FORCE_DELETE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, \
 	NODE_REBOOT, NODES, POSSIBLE_REBOOT_MESSAGE, PRE_CFG, PRE_INSTALL, \
 	PRE_JOIN, REBOOT_TASK, REDIRECT_MSG, RESOURCES, RICCI_CONNECT_FAILURE, \
-	RICCI_CONNECT_FAILURE_MSG, SEND_CONF, SERVICE_ADD, SERVICE_CONFIG, \
-	SERVICE_LIST, SERVICES, START_NODE, TASKTYPE, VM_ADD, VM_CONFIG, \
+	RICCI_CONNECT_FAILURE_MSG, SEND_CONF, \
+	SERVICE_LIST, SERVICES, START_NODE, TASKTYPE, \
 	REDIRECT_SEC, LUCI_CLUSTER_BASE_URL, FENCE_XVM_KEY_CREATE
 
 from FenceHandler import validateNewFenceDevice, \
-	validateFenceDevice, validate_fenceinstance, FD_VAL_SUCCESS
+	validateFenceDevice, FD_VAL_SUCCESS
 
 from ricci_communicator import RicciCommunicator, RicciError, \
 	batch_status, extract_module_status
@@ -274,23 +264,6 @@
 		request.SESSION.set('create_cluster', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
 
-	node_list = add_cluster['nodes'].keys()
-	batchNode = rq.createClusterBatch(add_cluster['cluster_os'],
-					clustername,
-					clustername,
-					node_list,
-					True,
-					True,
-					add_cluster['shared_storage'],
-					False,
-					add_cluster['download_pkgs'],
-					lockservers)
-
-	if not batchNode:
-		request.SESSION.set('create_cluster', add_cluster)
-		errors.append('Unable to generate cluster creation ricci command')
-		return (False, { 'errors': errors, 'messages': messages })
-
 	error = manageCluster(self, clustername,
 				add_cluster['nodes'], add_cluster['cluster_os'])
 	if error:
@@ -298,37 +271,30 @@
 		request.SESSION.set('create_cluster', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
 
+	node_list = add_cluster['nodes'].keys()
+
+	ret = send_batch_to_hosts(node_list, 10, rq.create_cluster, 
+			add_cluster['cluster_os'], clustername, clustername,
+			node_list, True, True, add_cluster['shared_storage'], False,
+			add_cluster['download_pkgs'], lockservers)
+
 	batch_id_map = {}
-	for i in node_list:
-		try:
-			rc = RicciCommunicator(i)
-			if not rc:
-				raise Exception, 'rc is None'
-		except Exception, e:
-			msg = 'Unable to connect to the ricci agent on %s: %s' % (i, str(e))
+	for i in ret.iterkeys():
+		if ret[i].has_key('error'):
+			msg = 'Unable to connect to the ricci agent on %s: %s' \
+						% (i, ret[i]['err_msg'])
 			errors.append(msg)
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose(msg)
-
-			if len(batch_id_map) == 0:
-				request.SESSION.set('create_cluster', add_cluster)
-				return (False, { 'errors': errors, 'messages': messages })
 			continue
+		batch_id_map[i] = ret[i]['batch_result']
 
-		try:
-			resultNode = rc.process_batch(batchNode, async=True)
-			batch_id_map[i] = resultNode.getAttribute('batch_id')
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('validateCreateCluster0: %s: %r %s' \
-					% (i, e, str(e)))
-			errors.append('An error occurred while attempting to add cluster node "%s"' % i)
-			if len(batch_id_map) == 0:
-				request.SESSION.set('create_cluster', add_cluster)
-				return (False, { 'errors': errors, 'messages': messages })
-			continue
+	if len(batch_id_map) == 0:
+		request.SESSION.set('create_cluster', add_cluster)
+		return (False, { 'errors': errors, 'messages': messages })
 
 	buildClusterCreateFlags(self, batch_id_map, clustername)
+
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 		% (request['URL'], CLUSTER_CONFIG, clustername))
@@ -712,18 +678,16 @@
 		% (request['URL'], CLUSTER_CONFIG, clustername))
 
 def validateServiceAdd(self, request):
-	errors = list()
-	fvar = GetReqVars(request, [ 'form_xml', 'clustername', 'domain', 'recovery', 'svc_name', 'action', 'URL' ])
+	from LuciValidation import validate_clusvc_add
 
+	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
 	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-	clustername = fvar['clustername']
 
-	form_xml = fvar['form_xml']
-	if form_xml is None:
-		form_xml = ''
+	clustername = fvar['clustername']
+	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA0: no form_xml')
-
+			luci_log.debug_verbose('VFE0: No cluster name')
+		return (False, {'errors': ['No cluster name was given']})
 
 	model = LuciExtractCluModel(self, request, clustername)
 	if model is None:
@@ -731,1060 +695,222 @@
 			luci_log.debug_verbose('vSA1: no model')
 		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
-	forms = []
-	if form_xml.strip():
-		try:
-			doc = minidom.parseString(form_xml)
-			forms = doc.getElementsByTagName('form')
-			if len(forms) < 1:
-				raise Exception, 'invalid XML'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vSA1: error: %r %s: %r' % (e, str(e), form_xml))
-			return (False, { 'errors': [ 'The resource data submitted for this service is not properly formed' ]})
-
-	form_hash = {}
-	form_hash['toplevel'] = { 'form': None, 'kids': [] }
-	for i in forms:
-		form_id = i.getAttribute('id')
-		form_parent = i.getAttribute('parent')
-		if not form_id or not form_parent:
-			continue
-		ielems = i.getElementsByTagName('input')
-		if not ielems or len(ielems) < 1:
-			continue
-		if not form_id in form_hash:
-			form_hash[form_id] = {'form': i, 'kids': []}
-		elif not form_hash[form_id]['form']:
-			form_hash[form_id]['form'] = i
-		if not form_parent in form_hash:
-			form_hash[form_parent] = {'form': None, 'kids': []}
-		form_hash[form_parent]['kids'].append(form_id)
-		dummy_form = {}
-
-		for i in ielems:
-			try:
-				input_type = str(i.getAttribute('type'))
-			except:
-				continue
-			if not input_type or input_type == 'button':
-				continue
-			try:
-				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vSA2: parsing XML: %r %s' \
-						% (e, str(e)))
-
-		try:
-			res_type = dummy_form['type'].strip()
-			if not res_type:
-				raise Exception, 'no resource type'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vSA3: %r %s' % (e, str(e)))
-			return (False, { 'errors': [ 'No resource type was specified' ]})
-
-		try:
-			if res_type == 'ip':
-				dummy_form['resourceName'] = dummy_form['ip_address']
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vSA3a: type is ip but no addr: %r %s' \
-					% (e, str(e)))
-			return (False, { 'errors': [ 'No IP address was given' ]})
-
-		try:
-			if dummy_form.has_key('immutable'):
-				newRes = model.getResourceByName(dummy_form['resourceName'])
-				resObj = RefObject(newRes)
-				resObj.setRef(newRes.getName())
-			else:
-				resObj = create_resource(res_type, dummy_form, model)
-		except Exception, e:
-			resObj = None
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vSA4: type %s: %r %s' \
-					% (res_type, e, str(e)))
-
-		if resObj is None:
-			return (False, { 'errors': [ 'An error occurred while adding %s' % res_type ]})
-
-		if dummy_form.has_key('__independent_subtree'):
-			resObj.addAttribute('__independent_subtree', '1')
-		else:
-			resObj.removeAttribute('__independent_subtree')
-		form_hash[form_id]['obj'] = resObj
+	ret = validate_clusvc_add(model, request)
+	if ret[0] is not True:
+		return ret
 
-	if len(errors) > 0:
-		return (False, {'errors': errors})
+	action_type = ret[1]['action_type']
+	action_msg = ret[1]['action_msg']
+	ret = propagateClusterConfAsync(self, model, rc=None,
+			action=action_type, pmsg=action_msg)
+	if ret[0] is not True:
+		return ret
 
-	fdom = fvar['domain']
+	response = request.RESPONSE
+	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+		% (baseurl, SERVICES, clustername))
 
-	recovery = fvar['recovery']
-	if recovery is not None and recovery != 'restart' and recovery != 'relocate' and recovery != 'disable':
-		errors.append('You entered an invalid recovery option: "%s" Valid options are "restart" "relocate" and "disable."')
+def validateResourceAdd(self, request):
+	from LuciValidation import validate_clures_add
 
-	service_name = fvar['svc_name']
-	if service_name is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5: no service name')
-		errors.append('No service name was given')
+	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
+	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
-	autostart = '1'
-	try:
-		if not request.form.has_key('autostart') or request.form['autostart'] == '0':
-			autostart = '0'
-	except Exception, e:
-		autostart = None
+	clustername = fvar['clustername']
+	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5a: error getting autostart: %r %s' \
-				% (e, str(e)))
-
-	exclusive = '0'
-	try:
-		if not request.form.has_key('exclusive') or request.form['exclusive'] != '1':
-			exclusive = '0'
-		else:
-			exclusive = '1'
-	except Exception, e:
-		exclusive = '0'
+			luci_log.debug_verbose('VFE0: No cluster name')
+		return (False, {'errors': ['No cluster name was given']})
 
-	try:
-		cur_service = model.retrieveServiceByName(service_name)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5c: no service named %s found: %r %s' \
-				% (service_name, e, str(e)))
-		cur_service = None
-
-	action = fvar['action']
-	if action is None:
-		return (False, {'errors': [ 'No action was given for service %s' % service_name ] })
-
-	if action == 'edit':
-		if cur_service is None:
-			return (False, {'errors': [ 'The service %s could not be found for editing' % service_name ]})
-		model.deleteService(service_name)
-	elif action == 'add':
-		if cur_service is not None:
-			return (False, {'errors': [ 'A service with the name %s already exists' % service_name ]})
-	else:
+	model = LuciExtractCluModel(self, request, clustername)
+	if model is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA4a: unknown action %s' \
-				% request.form['action'])
-		return (False, {'errors': [ 'An unknown action was specified' ]})
+			luci_log.debug_verbose('vRA1: no model')
+		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
-	def buildSvcTree(parent, child_id_list):
-		for i in child_id_list:
-			try:
-				child = form_hash[i]['obj']
-				if not child:
-					raise Exception, 'No object for %s' % i
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('bST0: %r %s' % (e, str(e)))
-				continue
-			parent.addChild(child)
-			if 'kids' in form_hash[i]:
-				buildSvcTree(child, form_hash[i]['kids'])
-
-	new_service = Service()
-	new_service.addAttribute('name', service_name)
-	if fdom:
-		new_service.addAttribute('domain', fdom)
-	if recovery:
-		new_service.addAttribute('recovery', recovery)
-	new_service.addAttribute('exclusive', str(exclusive))
-	if autostart is not None:
-		new_service.attr_hash['autostart'] = autostart
-
-	buildSvcTree(new_service, form_hash['toplevel']['kids'])
-	model.resourcemanager_ptr.addChild(new_service)
-	model.setModified(True)
-
-	if action == 'edit':
-		action_type = SERVICE_CONFIG
-		action_msg = 'Configuring service "%s"'
-	else:
-		action_type = SERVICE_ADD
-		action_msg = 'Creating service "%s"'
+	ret = validate_clures_add(model, request)
+	if ret[0] is not True:
+		return ret
 
+	resname = ret[1]['res_name']
 	ret = propagateClusterConfAsync(self, model, rc=None,
-			action=action_type, pmsg=action_msg % service_name)
+			action=RESOURCES, pmsg='Configuring cluster resource %s' % resname)
 	if ret[0] is not True:
 		return ret
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (baseurl, SERVICES, model.getClusterName()))
+			% (baseurl, RESOURCES, clustername))
 
-def validateResourceAdd(self, request):
-	try:
-		res_type = request.form['type'].strip()
-		if not res_type:
-			raise KeyError, 'type is blank'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VRA0: type is blank')
-		return (False, {'errors': ['No resource type was given']})
+	return (True, { 'messages': [ 'Resource "%s" configured successfully' % resname]})
 
-	model = LuciExtractCluModel(self, request)
-	if model is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VRA1: no model')
-		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration. The configuration XML may contain errors' ]})
+def validateConfigCluster(self, request):
+	from LuciValidation import validate_config_mcast, validate_config_qdisk, \
+		validate_config_fence, validate_config_gulm, validate_config_general
+
+	configFormValidators = {
+		'general': validate_config_general,
+		'mcast': validate_config_mcast,
+		'fence': validate_config_fence,
+		'qdisk': validate_config_qdisk,
+		'gulm': validate_config_gulm
+	}
 
 	errors = list()
-	try:
-		res = create_resource(res_type, request.form, model)
-	except Exception, e:
-		errors.extend(e)
+	messages = list()
+	fvar = GetReqVars(request, [ 'configtype', 'clustername', 'URL' ])
+	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
-	if len(errors) < 1:
-		try:
-			resourceAdd(self, request, model, res)
-		except Exception, e:
-			errors.append('An error occurred while adding resource "%s"' \
-				% res.getName())
-	if len(errors) > 0:
-		errors.append('An error occurred while adding this resource')
+	clustername = fvar['clustername']
+	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('resource error: %r %s' % (e, str(e)))
-		return (False, { 'errors': errors})
-
-	return (True, { 'messages': [ 'Resource added successfully' ]})
+			luci_log.debug_verbose('VFE0: No cluster name')
+		return (False, {'errors': ['No cluster name was given']})
 
-## Cluster properties form validation routines
+	model = LuciExtractCluModel(self, request, clustername)
+	if model is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC0: no model')
+		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
-# rhel5 cluster version
-def validateMCastConfig(model, form):
-	try:
-		gulm_ptr = model.getGULMPtr()
-		if gulm_ptr:
-			return (False, {'errors': ['Multicast cannot be used with GULM locking']})
-	except:
-		pass
+	if clustername is None:
+		clustername = model.getClusterName()
 
-	errors = list()
-	try:
-		mcast_val = form['mcast'].strip().lower()
-		if mcast_val != 'true' and mcast_val != 'false':
-			raise KeyError, mcast_val
-		if mcast_val == 'true':
-			mcast_manual = True
-		else:
-			mcast_manual = False
-	except KeyError, e:
-		errors.append('An invalid multicast selection was made')
-		return (False, {'errors': errors})
+	config_type = fvar['configtype']
+	if config_type is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC1: no config type')
+		return (False, {'errors': [ 'No configuration type was given' ]})
 
-	mcast_interface = None
-	if form.has_key('mcast_interface'):
-		mcast_interface = form['mcast_interface'].strip()
+	if not configFormValidators.has_key(config_type):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC2: invalid config type: %s' \
+				% config_type)
+		return (False, { 'errors': [ 'An invalid configuration type "%s" was submitted' % config_type ]})
 
-	if mcast_manual is True and form.has_key('cluster_version') and form['cluster_version'].strip() == 'rhel4' and not mcast_interface:
-		errors.append('No multicast interface was specified')
-		return (False, {'errors': errors})
+	config_validator = configFormValidators[config_type]
+	ret = config_validator(model, request.form)
 
-	if mcast_manual is True:
-		import socket
-		try:
-			addr_str = form['mcast_address'].strip()
-			socket.inet_pton(socket.AF_INET, addr_str)
-		except KeyError, e:
-			addr_str = None
-			errors.append('No multicast address was given')
-		except socket.error, e:
-			try:
-				socket.inet_pton(socket.AF_INET6, addr_str)
-			except socket.error, e:
-				addr_str = None
-				errors.append('An invalid multicast address was given: %s')
-	else:
-		addr_str = None
+	retcode = ret[0]
+	if ret[1].has_key('errors'):
+		errors.extend(ret[1]['errors'])
+	if ret[1].has_key('messages'):
+		messages.extend(ret[1]['messages'])
 
-	try:
-		if not addr_str:
-			if mcast_interface:
-				errors.append('A multicast interface was specified, but no multicast address was given')
-				return (False, {'errors': errors})
-			model.del_cluster_multicast()
-		else:
-			model.set_cluster_multicast(addr_str, mcast_if=mcast_interface)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('Error updating mcast properties: %r %s' \
-				% (e, str(e)))
-		errors.append('Unable to update cluster multicast properties')
+	if retcode is not True or len(errors) > 0:
+		return (False, {'errors': errors, 'messages': messages})
 
-	if len(errors) > 0:
-		return (False, {'errors': errors})
+	ret = propagateClusterConfAsync(self, model, None,
+			CLUSTER_CONFIG, 'Updating cluster configuration')
+	if ret[0] is not True:
+		if ret[1].has_key('errors'):
+			errors.extend(ret[1]['errors'])
+		return (retcode, {'errors': errors, 'messages': messages})
 
-	return (True, {})
+	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+		% (baseurl, CLUSTER_CONFIG, clustername))
 
-def validateQDiskConfig(model, form):
+def validateFenceAdd(self, request):
 	errors = list()
+	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
 
-	try:
-		qdisk_val = form['quorumd'].strip().lower()
-		if qdisk_val != 'true' and qdisk_val != 'false':
-			raise KeyError(qdisk_val)
-		if qdisk_val == 'true':
-			qdisk_val = 1
-		else:
-			qdisk_val = 0
-	except KeyError, e:
-		return (False, {'errors': ['An invalid quorum partition selection was made']})
-
-	cp = model.getClusterPtr()
-	qdp = model.getQuorumdPtr()
+	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
-	if not qdisk_val:
-		if qdp:
-			try:
-				cp.removeChild(qdp)
-			except Exception, e:
-				return (False, {'errors': [ 'Error disabling quorum partition: %s' % str(e) ] })
-		return (True, {})
+	clustername = fvar['clustername']
+	if clustername is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE0: No cluster name')
+		return (False, {'errors': ['No cluster name was given']})
 
-	try:
-		interval = int(form['interval'])
-		if interval < 0:
-			raise ValueError, 'Interval must be 0 or greater'
-	except KeyError, e:
-		errors.append('No Interval value was given')
-	except ValueError, e:
-		errors.append('An invalid Interval value was given: %s' % str(e))
-
-	try:
-		votes = int(form['votes'])
-		if votes < 1:
-			raise ValueError, 'Votes must be greater than 0'
-	except KeyError, e:
-		errors.append('No Votes value was given')
-	except ValueError, e:
-		errors.append('An invalid Votes value was given: %s' % str(e))
-
-	try:
-		tko = int(form['tko'])
-		if tko < 0:
-			raise ValueError, 'TKO must be 0 or greater'
-	except KeyError, e:
-		errors.append('No TKO value was given')
-	except ValueError, e:
-		errors.append('An invalid TKO value was given: %s' % str(e))
-
-	try:
-		min_score = int(form['min_score'])
-		if min_score < 1:
-			raise ValueError('Minimum Score must be greater than 0')
-	except KeyError, e:
-		errors.append('No Minimum Score value was given')
-	except ValueError, e:
-		errors.append('An invalid Minimum Score value was given: %s' % str(e))
+	model = LuciExtractCluModel(self, request, clustername)
+	if model is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFA0: no model')
+		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
 
-	#Either device or label must be present
-	device = None
-	try:
-		device = form['device'].strip()
-	except:
-		device = None
+	ret_code, ret_obj = validateNewFenceDevice(request.form, model)
+	if ret_code != FD_VAL_SUCCESS:
+		errors.extend(ret_obj)
+		return (False, { 'errors': errors })
 
-	label = None
-	try:
-		label = form['label'].strip()
-	except:
-		label = None
+	ret = propagateClusterConfAsync(self, model, None,
+			CLUSTER_CONFIG, 'Creating fence device "%s"' % ret_obj)
+	if ret[0] is not True:
+		return ret
 
-	if not device and not label:
-		errors.append('No Device or Label value was given')
+	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, ret_obj))
 
-	num_heuristics = 0
-	try:
-		num_heuristics = int(form['num_heuristics']) + 1
-		if num_heuristics < 1:
-			raise ValueError, form['num_heuristics']
-	except KeyError, e:
-		errors.append('No number of heuristics was given')
-	except ValueError, e:
-		errors.append('An invalid number of heuristics was given: %s' % str(e))
+def validateFenceEdit(self, request):
+	errors = list()
 
-	heuristics = list()
-	for i in xrange(num_heuristics):
-		try:
-			h = form['heuristic%d' % i]
-			if not h or len(h) != 3 or not (h[0].strip() and h[1].strip() and h[2].strip()):
-				continue
-		except:
-			continue
+	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
+	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
-		try:
-			hprog = h[0]
-			if not hprog:
-				raise Exception, 'no hprog'
-		except Exception, e:
-			errors.append('No program was given for heuristic %d' % (i + 1))
-		try:
-			hint = int(h[1])
-			if hint < 1:
-				raise ValueError, 'Heuristic interval values must be greater than 0'
-		except KeyError, e:
-			errors.append('No interval was given for heuristic %d' % (i + 1))
-		except ValueError, e:
-			errors.append('An invalid interval was given for heuristic %d: %s' \
-				% ((i + 1), str(e)))
+	clustername = fvar['clustername']
+	if clustername is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE0: No cluster name')
+		return (False, {'errors': ['No cluster name was given']})
 
-		try:
-			hscore = int(h[2])
-			if hscore < 1:
-				raise ValueError, 'Heuristic scores must be greater than 0'
-		except KeyError, e:
-			errors.append('No score was given for heuristic %d' % (i + 1))
-		except ValueError, e:
-			errors.append('An invalid score was given for heuristic %d: %s' \
-				% ((i + 1), str(e)))
+	model = LuciExtractCluModel(self, request, clustername)
+	if model is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE1: no model')
+		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
 
-		heuristics.append([ hprog, hint, hscore ])
+	# This is a fence edit situation, so the model should already have an
+	# entry for this fence device.
+	#
+	# pass form and model to validation method, then save changes if it passes.
+	error_code, retobj = validateFenceDevice(request.form, model)
+	if error_code != FD_VAL_SUCCESS:
+		errors.extend(retobj)
+		return (False, { 'errors': errors })
 
-	if len(errors) > 0:
-		return (False, {'errors': errors })
+	ret = propagateClusterConfAsync(self, model, None,
+			CLUSTER_CONFIG, 'Updating fence device "%s"' % retobj)
+	if ret[0] is not True:
+		return ret
 
-	qd = QuorumD()
-	qd.addAttribute('interval', str(interval))
-	qd.addAttribute('votes', str(votes))
-	qd.addAttribute('tko', str(tko))
-	qd.addAttribute('min_score', str(min_score))
+	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, retobj))
 
-	if device:
-		qd.addAttribute('device', str(device))
-	else:
-		qd.addAttribute('label', str(label))
+def validateNodeFenceConfig(self, request):
+	from LuciValidation import validate_node_fence_config
 
-	if qdp:
-		try:
-			cp.removeChild(qdp)
-		except:
-			pass
-	cp.addChild(qd)
+	fvar = GetReqVars(request, [ 'nodename', 'clustername', 'URL' ])
+	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
+	nodename = fvar['nodename']
 
-	for h in heuristics:
-		new_h = Heuristic()
-		new_h.addAttribute('program', str(h[0]))
-		new_h.addAttribute('interval', str(h[1]))
-		new_h.addAttribute('score', str(h[2]))
-		qd.addChild(new_h)
-
-	if len(errors) > 0:
-		return (False, {'errors': errors })
-
-	return (True, {})
-
-def validateGeneralConfig(model, form):
-	errors = list()
-
-	try:
-		cp = model.getClusterPtr()
-		old_name = model.getClusterAlias()
-		old_ver = int(cp.getConfigVersion())
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getConfigVersion: %s' % str(e))
-		errors.append('unable to determine the current configuration version')
-		return (False, {'errors': errors})
-
-	try:
-		cluster_name = form['cluname'].strip()
-		if not cluster_name:
-			raise KeyError('cluname')
-	except KeyError, e:
-		errors.append('No cluster name was given')
-
-	if len(cluster_name) > 15:
-		errors.append('A cluster\'s name must be less than 16 characters long')
-
-	try:
-		version_num = int(form['cfgver'])
-		if version_num < old_ver:
-			raise ValueError, 'configuration version number must be %d or greater' % old_ver
-	except KeyError, e:
-		errors.append('No cluster configuration version was given')
-	except ValueError, e:
-		errors.append('An invalid configuration version was given: %s' % str(e))
-
-	if len(errors) < 1:
-		try:
-			if cluster_name != old_name:
-				cp.addAttribute('alias', cluster_name)
-			cp.setConfigVersion(str(version_num))
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('unable to update general properties: %r %s' % (e, str(e)))
-			errors.append('Unable to update the cluster configuration')
-
-	try:
-		cluster_version = form['cluster_version'].strip()
-		if cluster_version != 'rhel5':
-			raise Exception, 'not rhel5'
-	except:
-		if len(errors) > 0:
-			return (False, {'errors': errors})
-		return (True, {})
-
-	totem = model.getTotemPtr()
-	if totem is None:
-		totem = model.addTotemPtr()
-
-	try:
-		token = form['token'].strip()
-		if not token:
-			raise KeyError, 'token'
-		token = int(token)
-		if token < 1:
-			raise ValueError, '%d is an invalid value for token timeout' % token
-		totem.addAttribute('token', str(token))
-	except KeyError, e:
-		try:
-			totem.removeAttribute('token')
-		except:
-			pass
-	except Exception, e:
-		errors.append(str(e))
-
-	try:
-		trblc = form['token_retransmits_before_loss_const'].strip()
-		if not trblc:
-			raise KeyError, 'token_retransmits_before_loss_const'
-		trblc = int(trblc)
-		if trblc < 1:
-			raise ValueError, '%d is an invalid value for number of token retransmits before loss' % trblc
-		totem.addAttribute('token_retransmits_before_loss_const', str(trblc))
-	except KeyError, e:
-		try:
-			totem.removeAttribute('token_retransmits_before_loss_const')
-		except:
-			pass
-	except Exception, e:
-		errors.append(str(e))
-
-	try:
-		join = form['join'].strip()
-		if not join:
-			raise KeyError, 'join'
-		join = int(join)
-		if join < 1:
-			raise ValueError, '%d is an invalid value for join timeout' % join
-		totem.addAttribute('join', str(join))
-	except KeyError, e:
-		try:
-			totem.removeAttribute('join')
-		except:
-			pass
-	except Exception, e:
-		errors.append(str(e))
-
-	try:
-		consensus = form['consensus'].strip()
-		if not consensus:
-			raise KeyError, 'consensus'
-		consensus = int(consensus)
-		if consensus < 1:
-			raise ValueError, '%d is an invalid value for consensus timeout' % consensus
-		totem.addAttribute('consensus', str(consensus))
-	except KeyError, e:
-		try:
-			totem.removeAttribute('consensus')
-		except:
-			pass
-	except Exception, e:
-		errors.append(str(e))
-
-	if len(errors) > 0:
-		return (False, {'errors': errors})
-	return (True, {})
-
-def validateFenceConfig(model, form):
-	errors = list()
-
-	if model.getGULMPtr() is not None:
-		return (False, {'errors': [ 'GULM clusters do not support fenced' ]})
-
-	try:
-		post_fail_delay = int(form['post_fail_delay'])
-		if post_fail_delay < 0:
-			raise ValueError('post fail delay values must be 0 or greater')
-	except KeyError, e:
-		errors.append('No post fail delay was given')
-	except ValueError, e:
-		errors.append('Invalid post fail delay: %s' % str(e))
-
-	try:
-		post_join_delay = int(form['post_join_delay'])
-		if post_join_delay < 0:
-			raise ValueError('post join delay values must be 0 or greater')
-	except KeyError, e:
-		errors.append('No post join delay was given')
-	except ValueError, e:
-		errors.append('Invalid post join delay: %s' % str(e))
-
-	run_xvmd = False
-	try:
-		run_xvmd = form.has_key('run_xvmd')
-	except:
-		pass
-
-	if run_xvmd is True and not model.hasFenceXVM():
-		fenceXVMd = FenceXVMd()
-		model.addFenceXVM(fenceXVMd)
-	elif not run_xvmd:
-		model.delFenceXVM()
-
-	try:
-		fd = model.getFenceDaemonPtr()
-		old_pj_delay = fd.getPostJoinDelay()
-		old_pf_delay = fd.getPostFailDelay()
-
-		if post_join_delay == old_pj_delay and post_fail_delay == old_pf_delay:
-			errors.append('No fence daemon properties were changed')
-		else:
-			fd.setPostJoinDelay(str(post_join_delay))
-			fd.setPostFailDelay(str(post_fail_delay))
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('Unable to update fence daemon properties: %r %s' % (e, str(e)))
-		errors.append('An error occurred while attempting to update fence daemon properties')
-
-	if len(errors) > 0:
-		return (False, {'errors': errors })
-
-	return (True, {})
-
-def validateGULMConfig(model, form):
-	gulm_ptr = model.getGULMPtr()
-	if not gulm_ptr:
-		return (False, {'errors': [ 'This cluster appears not to be using GULM locking' ]})
-
-	node_list = map(lambda x: x.getName(), gulm_ptr.getChildren())
-	for i in model.getNodeNames():
-		if not i in node_list:
-			node_list.append(i)
-
-	gulm_lockservers = list()
-	for node in node_list:
-		if form.has_key(node) and form[node] == 'on':
-			ls = Lockserver()
-			ls.addAttribute('name', node)
-			gulm_lockservers.append(ls)
-
-	try:
-		xlockservers = filter(lambda x: x.strip(), form['__GULM__'])
-	except:
-		xlockservers = list()
-
-	for i in xlockservers:
-		if not i in node_list:
-			ls = Lockserver()
-			ls.addAttribute('name', i)
-			gulm_lockservers.append(ls)
-
-	num_ls = len(gulm_lockservers)
-	if not num_ls in (1, 3, 5):
-		return (False, {'errors': [ 'You must have exactly 1, 3, or 5 GULM lock servers. You submitted %d lock servers' % num_ls ]})
-
-	model.GULM_ptr.children = gulm_lockservers
-	return (True, {})
-
-configFormValidators = {
-	'general': validateGeneralConfig,
-	'mcast': validateMCastConfig,
-	'fence': validateFenceConfig,
-	'qdisk': validateQDiskConfig,
-	'gulm': validateGULMConfig
-}
-
-def validateConfigCluster(self, request):
-	errors = list()
-	messages = list()
-	fvar = GetReqVars(request, [ 'configtype', 'clustername', 'URL' ])
-
-	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-	clustername = fvar['clustername']
+	clustername = fvar['clustername']
+	if clustername is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE0: No cluster name')
+		return (False, {'errors': ['No cluster name was given']})
 
 	model = LuciExtractCluModel(self, request, clustername)
 	if model is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VCC0: no model')
-		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
-
-	if clustername is None:
-		clustername = model.getClusterName()
-
-	config_type = fvar['configtype']
-	if config_type is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VCC1: no config type')
-		return (False, {'errors': [ 'No configuration type was given' ]})
-
-	if not configFormValidators.has_key(config_type):
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VCC2: invalid config type: %s' \
-				% config_type)
-		return (False, { 'errors': [ 'An invalid configuration type "%s" was submitted' % config_type ]})
-
-	config_validator = configFormValidators[config_type]
-	ret = config_validator(model, request.form)
-
-	retcode = ret[0]
-	if ret[1].has_key('errors'):
-		errors.extend(ret[1]['errors'])
-	if ret[1].has_key('messages'):
-		messages.extend(ret[1]['messages'])
-
-	if retcode is not True or len(errors) > 0:
-		return (False, {'errors': errors, 'messages': messages})
-
-	ret = propagateClusterConfAsync(self, model, None,
-			CLUSTER_CONFIG, 'Updating cluster configuration')
-	if ret[0] is not True:
-		if ret[1].has_key('errors'):
-			errors.extend(ret[1]['errors'])
-		return (retcode, {'errors': errors, 'messages': messages})
-
-	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (baseurl, CLUSTER_CONFIG, clustername))
-
-def validateFenceAdd(self, request):
-	errors = list()
-	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
-
-	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-	clustername = fvar['clustername']
-
-	model = LuciExtractCluModel(self, request, clustername)
-	if model is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VFA0: no model')
-		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
-
-	ret_code, ret_obj = validateNewFenceDevice(request.form, model)
-	if ret_code != FD_VAL_SUCCESS:
-		errors.extend(ret_obj)
-		return (False, { 'errors': errors })
-
-	ret = propagateClusterConfAsync(self, model, None,
-			CLUSTER_CONFIG, 'Creating fence device "%s"' % ret_obj)
-	if ret[0] is not True:
-		return ret
-
-	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, ret_obj))
-
-def validateFenceEdit(self, request):
-	errors = list()
-
-	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
-	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-
-	clustername = fvar['clustername']
-	if clustername is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VFE0: No cluster name')
-		return (False, {'errors': ['No cluster name was given']})
-
-	model = LuciExtractCluModel(self, request, clustername)
-	if model is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VFE1: no model')
+			luci_log.debug_verbose('vNFC6: no model for %s' % clustername)
 		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
 
-	# This is a fence edit situation, so the model should already have an
-	# entry for this fence device.
-	#
-	# pass form and model to validation method, then save changes if it passes.
-	error_code, retobj = validateFenceDevice(request.form, model)
-	if error_code != FD_VAL_SUCCESS:
-		errors.extend(retobj)
-		return (False, { 'errors': errors })
-
-	ret = propagateClusterConfAsync(self, model, None,
-			CLUSTER_CONFIG, 'Updating fence device "%s"' % retobj)
+	ret = validate_node_fence_config(model, request)
 	if ret[0] is not True:
 		return ret
 
-	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, retobj))
-
-def validateNodeFenceConfig(self, request):
-	errors = list()
-	fvar = GetReqVars(request,
-			[ 'fence_xml', 'fence_level', 'nodename', 'clustername', 'URL' ])
-
-	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-
-	if fvar['fence_xml'] is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC0: no fence_xml for node %s' \
-				% fvar['nodename'])
-		return (False, {'errors': ['No fence data was supplied']})
-
-	if fvar['fence_level'] is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC1: no fence level for %s' \
-				% fvar['nodename'])
-		return (False, {'errors': ['No fence level was supplied']})
-
-	try:
-		fence_level = int(fvar['fence_level'])
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC2: invalid fence level: %s: %r %s' \
-				% (fvar['fence_level'], e, str(e)))
-		return (False, {'errors': ['"%s" is an invalid fence level' % fvar['fence_level'] ]})
-
-	nodename = fvar['nodename']
-	if nodename is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC3: no nodename: %r %s' % (e, str(e)))
-		return (False, {'errors': ['No node name was given']})
-
-	clustername = fvar['clustername']
-	if clustername is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC4: no clustername: %r %s' % (e, str(e)))
-		return (False, {'errors': ['No cluster name was given']})
-
-	model = LuciExtractCluModel(self, request, clustername)
-	if model is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC6: no model for %s' % clustername)
-		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
-
-	try:
-		doc = minidom.parseString(fvar['fence_xml'])
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC7: error: %r %s' % (e, str(e)))
-		return (False, {'errors': ['The fence data submitted is not properly formed']})
-
-	try:
-		node = model.retrieveNodeByName(nodename)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC8: unable to find node name %s in current node list: %r %s' % (nodename, e, str(e)))
-		return (False, {'errors': ['Unable to find the cluster node %s in the node list' % nodename ]})
-
-	levels = node.getFenceLevels()
-	try:
-		method_id = levels[fence_level - 1].getAttribute('name')
-		if not method_id:
-			raise Exception, 'No method ID'
-		fence_method = Method()
-		fence_method.addAttribute('name', str(method_id))
-		levels[fence_level - 1] = fence_method
-	except Exception, e:
-		method_id = fence_level
-		fence_method = Method()
-		fence_method.addAttribute('name', str(method_id))
-
-	forms = doc.getElementsByTagName('form')
-	if len(forms) < 1:
-		delete_target = None
-		for l in levels:
-			# delete the fence level
-			if l.getAttribute('name') == method_id:
-				delete_target = l
-				break
-		if delete_target is not None:
-			try:
-				node.getChildren()[0].removeChild(delete_target)
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC9: %s: %r %s' \
-						% (method_id, e, str(e)))
-				return (False, {'errors': ['An error occurred while deleting fence method %s' % method_id ]})
-		else:
-			return (True, {'messages': ['No changes were made'] })
-
-	form_hash = {}
-	for i in forms:
-		form_id = i.getAttribute('id')
-		if not form_id:
-			continue
-		ielems = i.getElementsByTagName('input')
-		if not ielems or len(ielems) < 1:
-			continue
-
-		dummy_form = {}
-
-		for i in ielems:
-			try:
-				input_type = str(i.getAttribute('type'))
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC10: input type: %r %s' \
-						% (e, str(e)))
-				continue
-
-			if not input_type or input_type == 'button':
-				continue
-
-			try:
-				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC11: parsing XML: %r %s' \
-						% (e, str(e)))
-
-		if len(dummy_form) < 1:
-			continue
-
-		if dummy_form.has_key('fence_instance'):
-			try:
-				parent = dummy_form['parent_fencedev']
-			except:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC12: no parent for instance')
-				return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
-
-			try:
-				form_hash[parent][1].append(dummy_form)
-				del dummy_form['fence_instance']
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC13: no parent for instance')
-				return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
-		else:
-			form_hash[form_id] = (dummy_form, list())
-
-	fh_keys = form_hash.keys()
-	fh_keys.sort()
-	for i in fh_keys:
-		fencedev_name = None
-		fencedev_unknown = False
-
-		try:
-			fence_form, instance_list = form_hash[i]
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vNFC14: %r %s' % (e, str(e)))
-			continue
-
-		try:
-			fence_type = fence_form['fence_type']
-			if not fence_type:
-				raise Exception, 'fence type is blank'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vNFC15: %s: %r %s' % (i, e, str(e)))
-			fence_type = None
-
-		if fence_form.has_key('existing_device'):
-			try:
-				fencedev_name = fence_form['name']
-				if not fencedev_name.strip():
-					raise Exception, 'no fence name'
-			except Exception, e:
-				errors.append('You must provide a unique name for all fence devices')
-				continue
-
-			if fence_type is None:
-				# An unknown fence device agent. Pull the data out of
-				# the model and persist it and all instances.
-				# All we care about is its name.
-				fencedev_unknown = True
-			else:
-				if not fence_form.has_key('sharable'):
-					# If it's a shared fence device that already exists, the
-					# user could not have edited it (without playing dirty
-					# games), so it's safe to pull the existing entry from
-					# the model. All we need is the device name, and nothing
-					# else needs to be done here.
-					#
-					# For an existing non-shared device update the device
-					# in the model, since the user could have edited it.
-					retcode, retmsg = validateFenceDevice(fence_form, model)
-					if retcode != FD_VAL_SUCCESS:
-						errors.extend(retmsg)
-						continue
-					else:
-						fencedev_name = retmsg
-
-					# Add back the tags under the method block
-					# for the fence instance
-					if type == 'fence_manual':
-						instance_list.append({'name': fencedev_name, 'nodename': nodename })
-					else:
-						instance_list.append({'name': fencedev_name })
-		else:
-			# The user created a new fence device.
-			retcode, retmsg = validateNewFenceDevice(fence_form, model)
-			if retcode != FD_VAL_SUCCESS:
-				errors.extend(retmsg)
-				continue
-			else:
-				fencedev_name = retmsg
-
-			# If it's not shared, we need to create an instance form
-			# so the appropriate XML goes into the <method> block inside
-			# <node><fence>. All we need for that is the device name.
-			if not fence_form.has_key('sharable'):
-				if type == 'fence_manual':
-					instance_list.append({'name': fencedev_name, 'nodename': nodename })
-				else:
-					instance_list.append({'name': fencedev_name })
-
-		if fencedev_unknown is True:
-			# Save any instances for this fence device.
-			# XXX FIX ME - instances must be saved.
-			pass
-
-		for inst in instance_list:
-			retcode, retobj = validate_fenceinstance(inst, fencedev_name, fence_type)
-			if retcode != FD_VAL_SUCCESS:
-				errors.extend(retobj)
-				continue
-			fence_method.addChild(retobj)
-
-		if len(node.getChildren()) > 0:
-			# There's already a <fence> block
-			found_target = False
-			for idx in xrange(len(levels)):
-				if levels[idx].getAttribute('name') == method_id:
-					found_target = True
-					break
-
-			if found_target is False:
-				# There's a fence block, but no relevant method
-				# block
-				node.getChildren()[0].addChild(fence_method)
-		else:
-			# There is no <fence> tag under the node yet.
-			fence_node = Fence()
-			fence_node.addChild(fence_method)
-			node.addChild(fence_node)
-
-	if len(errors) > 0:
-		return (False, {'errors': errors })
-
 	ret = propagateClusterConfAsync(self, model, None, FENCEDEV_NODE_CONFIG,
-			'Updating fence configuration for node "%s"' % fvar['nodename'])
+			'Updating fence configuration for node "%s"' % nodename)
 	if ret[0] is not True:
 		return ret
 
 	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&nodename=%s&busyfirst=true' % (baseurl, NODE, clustername, nodename))
 
 def deleteFenceDevice(self, request):
+	from LuciValidation import validate_fence_del
 	errors = list()
 
-	fvar = GetReqVars(request,
-			[ 'orig_name', 'nodename', 'clustername', 'URL' ])
-
+	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
 	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
-	nodename = fvar['nodename']
-	if nodename is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('DFD0: no node name')
-		return (False, {'errors': ['No node name was given']})
-
 	clustername = fvar['clustername']
 	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
@@ -1797,28 +923,12 @@
 			luci_log.debug_verbose('DFD2: no model')
 		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
 
-	fencedev_name = fvar['orig_name']
-	if fencedev_name is None:
-		return (False, {'errors': ['No fence device name in form submission']})
-
-	try:
-		fdev = model.getFenceDeviceByName(fencedev_name)
-		if fdev:
-			if model.deleteFenceDevice(fdev) is not True:
-				raise Exception, 'failed to remove %s' % fdev.getName()
-			model.removeFenceInstancesForFenceDevice(fencedev_name)
-		else:
-			raise Exception, 'no fence device named "%s" was found' \
-					% fencedev_name
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('DFD3: %s: %r %s' \
-				% (fencedev_name, e, str(e)))
-		return (False, { 'errors': [ 'Error removing fence device %s: %s' \
-										% (fencedev_name, str(e)) ]})
+	ret = validate_fence_del(model, request)
+	if ret[0] is not True:
+		return ret
 
 	ret = propagateClusterConfAsync(self, model, None, CLUSTER_CONFIG,
-			'Removing fence device "%s"' % fencedev_name)
+			'Removing fence device "%s"' % ret[1].get('name'))
 	if ret[0] is not True:
 		return ret
 
@@ -1826,47 +936,22 @@
 		% (baseurl, FENCEDEVS, clustername))
 
 def validateDaemonProperties(self, request):
-	errors = list()
+	from LuciValidation import validate_cluster_daemon_form
 
 	fvar = GetReqVars(request,
-			[ 'orig_name', 'nodename', 'clustername', 'URL' ])
-
+		[ 'nodename', 'clustername', 'URL' ])
 	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-
 	clustername = fvar['clustername']
-	if clustername is None:
-		errors.append('Unable to determine the current cluster name')
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDP2: no clustername')
-
 	nodename = fvar['nodename']
-	if nodename is None:
-		errors.append('Unable to determine the current node name')
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDP1: no nodename for %s' % clustername)
 
-	disable_list = list()
-	enable_list = list()
-	for i in request.form.items():
-		try:
-			if i[0][:11] == '__daemon__:':
-				daemon_prop = i[1]
-				if len(daemon_prop) == 2:
-					if daemon_prop[1] == '1':
-						disable_list.append(daemon_prop[0])
-				else:
-					if daemon_prop[1] == '0' and daemon_prop[2] == 'on':
-						enable_list.append(daemon_prop[0])
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VDP3: error: %s: %r %s' \
-					% (str(i), e, str(e)))
+	ret = validate_cluster_daemon_form(self, request)
+	if ret[0] is not True:
+		return ret
 
-	if len(enable_list) < 1 and len(disable_list) < 1:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDP4: no changes made')
-		request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&nodename=%s' \
-			% (baseurl, NODE, clustername, nodename))
+	enable_list = ret[1]['enable_list']
+	disable_list = ret[1]['disable_list']
+
+	errors = list()
 
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
 	try:
@@ -1878,14 +963,14 @@
 			luci_log.debug_verbose('VDP5: RC %s: %r %s' \
 				% (nodename_resolved, e, str(e)))
 		errors.append('Unable to connect to the ricci agent on %s to update cluster daemon properties' % nodename_resolved)
-		return (False, {'errors': errors})
+		return (False, { 'errors': errors})
 
 	batch_id, result = rq.updateServices(rc, enable_list, disable_list)
 	if batch_id is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('VDP6: setCluserConf: batchid or result is None')
 		errors.append('Unable to update the cluster daemon properties on node %s' % nodename_resolved)
-		return (False, {'errors': errors})
+		return (False, { 'errors': errors})
 
 	try:
 		if len(enable_list) > 0:
@@ -1906,13 +991,17 @@
 	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&nodename=%s&busyfirst=true' % (baseurl, NODE, clustername, nodename))
 
 def validateFdom(self, request):
-	errors = list()
+	from LuciValidation import validate_fdom
+
 	fvar = GetReqVars(request, [ 'clustername', 'name', 'oldname', 'URL' ])
 	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
+	name = fvar['name']
 	clustername = fvar['clustername']
 	if clustername is None:
-		errors.append('Unable to determine this cluster\'s name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE0: No cluster name')
+		return (False, {'errors': ['No cluster name was given']})
 
 	model = LuciExtractCluModel(self, request, clustername)
 	if model is None:
@@ -1920,111 +1009,21 @@
 			luci_log.debug_verbose('validateFdom0: no model')
 		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
-	name = fvar['name']
-	if name is None:
-		errors.append('No name was given for this failover domain')
-
-	prioritized = False
-	try:
-		prioritized = request.form.has_key('prioritized')
-	except:
-		prioritized = False
-
-	restricted = False
-	try:
-		restricted = request.form.has_key('restricted')
-	except:
-		restricted = False
-
-	nofailback = False
-	try:
-		nofailback = request.form.has_key('nofailback')
-	except:
-		nofailback = False
-
-	oldname = fvar['oldname']
-
-	if oldname is None or oldname != name:
-		if model.getFailoverDomainByName(name) is not None:
-			errors.append('A failover domain named "%s" already exists' % name)
-
-	fdom = None
-	if oldname is not None:
-		fdom = model.getFailoverDomainByName(oldname)
-		if fdom is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('validateFdom1: No fdom named %s exists' % oldname)
-			errors.append('No failover domain named "%s" exists' % oldname)
-		else:
-			fdom.addAttribute('name', name)
-			fdom.children = list()
-	else:
-		fdom = FailoverDomain()
-		fdom.addAttribute('name', name)
-
-	if fdom is None or len(errors) > 0:
-		return (False, {'errors': errors })
-
-	if prioritized:
-		fdom.addAttribute('ordered', '1')
-	else:
-		fdom.addAttribute('ordered', '0')
-
-	if restricted:
-		fdom.addAttribute('restricted', '1')
-	else:
-		fdom.addAttribute('restricted', '0')
-
-	if nofailback:
-		fdom.addAttribute('nofailback', '1')
-	else:
-		fdom.addAttribute('nofailback', '0')
-
-	for i in model.getNodeNames():
-		if request.form.has_key(i):
-			fdn = FailoverDomainNode()
-			fdn.addAttribute('name', i)
-			if prioritized:
-				priority = 1
-				try:
-					priority = int(request.form['__PRIORITY__%s' % i].strip())
-					if priority < 1:
-						priority = 1
-				except Exception, e:
-					priority = 1
-				fdn.addAttribute('priority', str(priority))
-			fdom.addChild(fdn)
-
-	try:
-		fdom_ptr = model.getFailoverDomainPtr()
-		if not oldname:
-			fdom_ptr.addChild(fdom)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateFdom2: %r %s' % (e, str(e)))
-		errors.append('Unable to update the cluster configuration')
-
-	if len(errors) > 0:
-		return (False, {'errors': errors })
-
-	if oldname:
-		action = FDOM
-		status_msg = 'Updating failover domain "%s"' % oldname
-	else:
-		action = FDOM_ADD
-		status_msg = 'Creating failover domain "%s"' % name
+	ret = validate_fdom(self, request)
+	if ret[0] is not True:
+		return ret
 
-	ret = propagateClusterConfAsync(self, model, None, action, status_msg)
+	ret = propagateClusterConfAsync(self, model, None,
+			ret[1]['action'], ret[1]['msg'])
 	if ret[0] is not True:
 		return ret
 
 	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fdomname=%s&busyfirst=true' % (baseurl, FDOM, clustername, name))
 
 def validateVM(self, request):
-	errors = list()
-
-	fvar = GetReqVars(request, [ 'clustername', 'vmname', 'oldname', 'vmpath', 'recovery', 'domain', 'URL' ])
+	from LuciValidation import validate_vmsvc_form
 
+	fvar = GetReqVars(request, [ 'clustername', 'URL' ])
 	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
 	clustername = fvar['clustername']
@@ -2039,101 +1038,14 @@
 			luci_log.debug_verbose('validateVM1: no model')
 		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
-	vm_name = fvar['vmname']
-	if vm_name is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateVM2: no vm name')
-		errors.append('No virtual machine name was given')
-
-	vm_path = fvar['vmpath']
-	if vm_path is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateVM3: no vm path')
-		errors.append('No path to the virtual machine configuration directory was given')
-
-	autostart = 1
-	if request.form.has_key('autostart'):
-		autostart = 1
-	else:
-		autostart = 0
-
-	exclusive = 0
-	if request.form.has_key('exclusive'):
-		exclusive = 1
-	else:
-		exclusive = 0
-
-	recovery = fvar['recovery']
-	if recovery is not None and recovery != 'restart' and recovery != 'relocate' and recovery != 'disable':
-		errors.append('You entered an invalid recovery option: "%s" Valid options are "restart" "relocate" and "disable"')
-
-	fdom = fvar['domain']
-
-	if len(errors) > 0:
-		return (False, {'errors': errors })
-
-	isNew = False
-	old_name = fvar['oldname']
-	if old_name is None:
-		isNew = True
-
-	delete_vm = False
-	if request.form.has_key('delete'):
-		try:
-			xvm = model.retrieveVMsByName(old_name)
-			if not xvm:
-				raise Exception, 'not found'
-			rmptr = model.getResourceManagerPtr()
-			rmptr.removeChild(xvm)
-			delete_vm = True
-		except:
-			return (False, {'errors': ['No virtual machine service named "%s" exists' % old_name ]})
-	else:
-		if isNew is True:
-			xvm = Vm()
-			xvm.addAttribute('name', vm_name)
-			xvm.addAttribute('path', vm_path)
-			rmptr = model.getResourceManagerPtr()
-			rmptr.addChild(xvm)
-		else:
-			try:
-				xvm = model.retrieveVMsByName(old_name)
-				if not xvm:
-					raise Exception, 'not found'
-			except:
-				return (False, {'errors': ['No virtual machine service named "%s" exists' % old_name ]})
-			xvm.addAttribute('name', vm_name)
-			xvm.addAttribute('path', vm_path)
-
-	xvm.addAttribute('autostart', str(autostart))
-	xvm.addAttribute('exclusive', str(exclusive))
-	if fdom:
-		xvm.addAttribute('domain', fdom)
-	else:
-		try:
-			xvm.removeAttribute('domain')
-		except:
-			pass
-
-	if recovery:
-		xvm.addAttribute('recovery', recovery)
-	else:
-		try:
-			xvm.removeAttribute('recovery')
-		except:
-			pass
+	ret = validate_vmsvc_form(model, request)
+	if ret[0] is not True:
+		return ret
 
-	if delete_vm is True:
-		action = VM_CONFIG
-		status_msg = 'Deleting virtual machine service "%s"' % vm_name
-	elif isNew is True:
-		action = VM_ADD
-		status_msg = 'Creating virtual machine service "%s"' % vm_name
-	else:
-		action = VM_CONFIG
-		status_msg = 'Configuring virtual machine service "%s"' % vm_name
+	action_type = ret[1]['action_type']
+	action_msg = ret[1]['action_msg']
 
-	ret = propagateClusterConfAsync(self, model, None, action, status_msg)
+	ret = propagateClusterConfAsync(self, model, None, action_type, action_msg)
 	if ret[0] is not True:
 		return ret
 
@@ -2204,7 +1116,7 @@
 	cc = None
 	if req.has_key('new_cluster_conf'):
 		cc = req['new_cluster_conf']
-		msg_list.append('Checking if valid XML - ')
+		msg_list.append('Checking XML validity - ')
 		cc_xml = None
 		try:
 			cc_xml = minidom.parseString(cc)
@@ -2269,7 +1181,8 @@
 	57: deleteFenceDevice,
 	58: validateNodeFenceConfig,
 	60: validate_xvm_key_dist,
-	80: process_cluster_conf_editor
+	80: process_cluster_conf_editor,
+	1001: validate_clusvc_async 
 }
 
 def validatePost(self, request):
@@ -2278,41 +1191,15 @@
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('VP0: error: %r %s' % (e, str(e)))
-		return None
+		return (False, {})
 
 	if not pagetype in formValidators:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('VP1: no handler for page type %d' % pagetype)
-		return None
+		return (False, {})
 	else:
 		return formValidators[pagetype](self, request)
 
-def getClusterURL(self, request, model):
-	try:
-		clustername = request['clustername'].strip()
-		if not clustername:
-			raise Exception, 'cluster name from request is blank'
-	except:
-		try:
-			clustername = model.getClusterName()
-			if not clustername:
-				raise Exception, 'cluster name from model is blank'
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GCURL0: unable to get cluster name')
-			return ''
-
-	return '/luci/cluster/index_html?pagetype=7&clustername=%s' % clustername
-
-def getRicciAgentForCluster(self, req):
-	fvar = GetReqVars(req, [ 'clustername' ])
-	clustername = fvar['clustername']
-	if clustername is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('GRAFC0: no cluster name was found')
-		return None
-	return getRicciAgent(self, clustername)
-
 def clusterTaskProcess(self, model, request):
 	fvar = GetReqVars(request, [ 'task', 'clustername', 'URL' ])
 
@@ -2366,10 +1253,10 @@
 	fvar = GetReqVars(request, [ 'task', 'clustername', 'nodename', 'URL' ])
 
 	task = fvar['task']
-	clustername = fvar['clustername']
 	nodename = fvar['nodename']
 	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
+	clustername = fvar['clustername']
 	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug('NTP0: missing cluster name')
@@ -2435,67 +1322,201 @@
 				# we'll hit it again, and try again then
 				pass
 
-		if rc is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP7: node %s is not authenticated' \
-					% nodename_resolved)
-			return (False, { 'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
+		if rc is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP7: node %s is not authenticated' \
+					% nodename_resolved)
+			return (False, { 'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
+
+	if task == NODE_LEAVE_CLUSTER:
+		from LuciClusterActions import NodeLeaveCluster
+		if NodeLeaveCluster(self, rc, clustername, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP8: nodeLeave failed')
+			return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
+
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, NODES, clustername))
+	elif task == NODE_JOIN_CLUSTER:
+		from LuciClusterActions import NodeJoinCluster
+		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP9: nodeJoin failed')
+			return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
+
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, NODES, clustername))
+	elif task == NODE_REBOOT:
+		from LuciClusterActions import NodeReboot
+		if NodeReboot(self, rc, clustername, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP10: nodeReboot failed')
+			return (False, {'errors': [ 'Node "%s" failed to reboot' \
+				% nodename_resolved ]})
+
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, NODES, clustername))
+	elif task == NODE_FENCE:
+		from LuciClusterActions import NodeFence
+		if NodeFence(self, clustername, nodename, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP11: nodeFencefailed')
+			return (False, {'errors': [ 'Fencing of node "%s" failed' \
+				% nodename_resolved]})
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, NODES, clustername))
+	elif task == NODE_DELETE:
+		from LuciClusterActions import NodeDeleteFromCluster
+		if NodeDeleteFromCluster(self, rc, model, clustername, nodename, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP12: nodeDelete failed')
+			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, NODES, clustername))
+	elif task == NODE_FORCE_DELETE:
+		from LuciClusterActions import NodeForceDeleteFromCluster
+		if NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP13: nodeForceDelete failed')
+			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, NODES, clustername))
+
+def getResourceInfo(model, request):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRI0: no model object in session')
+		return {}
+
+	fvars = GetReqVars(request,
+				[ 'resourcename', 'type', 'value', 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	name = fvars['resourcename']
+	if name is None:
+		if fvars['type'] == 'ip':
+			name = fvars['value']
+
+	if name is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRI1: missing res name')
+		return {}
+
+	from LuciClusterInfo import getResourceInfo as gri
+	return gri(model, name, baseurl, res=None)
+
+def serviceRestart(self, rc, req):
+	from LuciClusterActions import RestartCluSvc
+
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	ret = RestartCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
+	else:
+		return ret
+
+def serviceStop(self, rc, req):
+	from LuciClusterActions import StopCluSvc
+
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	ret = StopCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
+	else:
+		return ret
+
+def serviceStart(self, rc, req):
+	from LuciClusterActions import StartCluSvc
+
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	ret = StartCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
+	else:
+		return ret
+
+def serviceDelete(self, rc, req):
+	from LuciClusterActions import DeleteCluSvc
+
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername']
 
-	if task == NODE_LEAVE_CLUSTER:
-		from LuciClusterActions import NodeLeaveCluster
-		if NodeLeaveCluster(self, rc, clustername, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('NTP8: nodeLeave failed')
-			return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
+	model = LuciExtractCluModel(self, req, clustername)
+	if model is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceDelete0: no model')
+		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
+	ret = DeleteCluSvc(self, rc, fvars, model)
+	if ret is None:
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, NODES, clustername))
-	elif task == NODE_JOIN_CLUSTER:
-		from LuciClusterActions import NodeJoinCluster
-		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('NTP9: nodeJoin failed')
-			return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
+			% (baseurl, SERVICES, clustername))
+	else:
+		return ret
 
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, NODES, clustername))
-	elif task == NODE_REBOOT:
-		from LuciClusterActions import NodeReboot
-		if NodeReboot(self, rc, clustername, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('NTP10: nodeReboot failed')
-			return (False, {'errors': [ 'Node "%s" failed to reboot' \
-				% nodename_resolved ]})
+def serviceMigrate(self, rc, req):
+	from LuciClusterActions import MigrateCluSvc
 
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, NODES, clustername))
-	elif task == NODE_FENCE:
-		from LuciClusterActions import NodeFence
-		if NodeFence(self, clustername, nodename, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('NTP11: nodeFencefailed')
-			return (False, {'errors': [ 'Fencing of node "%s" failed' \
-				% nodename_resolved]})
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, NODES, clustername))
-	elif task == NODE_DELETE:
-		from LuciClusterActions import NodeDeleteFromCluster
-		if NodeDeleteFromCluster(self, rc, model, clustername, nodename, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('NTP12: nodeDelete failed')
-			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
 
+	ret = MigrateCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, NODES, clustername))
-	elif task == NODE_FORCE_DELETE:
-		from LuciClusterActions import NodeForceDeleteFromCluster
-		if NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('NTP13: nodeForceDelete failed')
-			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
+	else:
+		return ret
+
+def resourceDelete(self, rc, req):
+	from LuciClusterActions import DeleteResource
+
+	fvars = GetReqVars(req,
+		[ 'clustername', 'resourcename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername']
+
+	model = LuciExtractCluModel(self, req, clustername)
+	if model is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('resourceDelete0: no model')
+		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
 
+	ret = DeleteResource(self, rc, model, fvars['resourcename'])
+	if ret is None:
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, NODES, clustername))
+			% (baseurl, RESOURCES, clustername))
+	else:
+		return ret
+
+def getSystemLogs(self, req):
+	from LuciClusterActions import GetSystemLogs
+
+	fvars = GetReqVars(req, [ 'clustername', 'nodename' ])
+	return GetSystemLogs(self, fvars)
 
 def isClusterBusy(self, req):
 	items = None
@@ -2832,166 +1853,3 @@
 	if LUCI_DEBUG_MODE is True:
 		luci_log.debug_verbose('ICB26: returning busy_map: %s' % str(busy_map))
 	return busy_map
-
-# These are called from external methods.
-def getResourceInfo(model, request):
-	if not model:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRI0: no model object in session')
-		return {}
-
-	fvars = GetReqVars(request,
-				[ 'resourcename', 'type', 'value', 'clustername', 'URL' ])
-
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
-	name = fvars['resourcename']
-	if name is None:
-		if fvars['type'] == 'ip':
-			name = fvars['value']
-
-	if name is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRI1: missing res name')
-		return {}
-
-	from LuciClusterInfo import getResourceInfo as gri
-	return gri(model, name, baseurl, res=None)
-
-def serviceRestart(self, rc, req):
-	from LuciClusterActions import RestartCluSvc
-
-	fvars = GetReqVars(req,
-				[ 'clustername', 'servicename', 'nodename', 'URL' ])
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
-	ret = RestartCluSvc(self, rc, fvars)
-	if ret is None:
-		response = req.RESPONSE
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, SERVICE_LIST, fvars['clustername']))
-	else:
-		return ret
-
-def serviceStop(self, rc, req):
-	from LuciClusterActions import StopCluSvc
-
-	fvars = GetReqVars(req,
-				[ 'clustername', 'servicename', 'nodename', 'URL' ])
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
-	ret = StopCluSvc(self, rc, fvars)
-	if ret is None:
-		response = req.RESPONSE
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, SERVICE_LIST, fvars['clustername']))
-	else:
-		return ret
-
-def serviceStart(self, rc, req):
-	from LuciClusterActions import StartCluSvc
-
-	fvars = GetReqVars(req,
-				[ 'clustername', 'servicename', 'nodename', 'URL' ])
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
-	ret = StartCluSvc(self, rc, fvars)
-	if ret is None:
-		response = req.RESPONSE
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, SERVICE_LIST, fvars['clustername']))
-	else:
-		return ret
-
-def serviceDelete(self, rc, req):
-	from LuciClusterActions import DeleteCluSvc
-
-	fvars = GetReqVars(req,
-				[ 'clustername', 'servicename', 'nodename', 'URL' ])
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-	clustername = fvars['clustername']
-
-	model = LuciExtractCluModel(self, req, clustername)
-	if model is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceDelete0: no model')
-		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
-
-	ret = DeleteCluSvc(self, rc, fvars, model)
-	if ret is None:
-		response = req.RESPONSE
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, SERVICES, clustername))
-	else:
-		return ret
-
-def serviceMigrate(self, rc, req):
-	from LuciClusterActions import MigrateCluSvc
-
-	fvars = GetReqVars(req,
-				[ 'clustername', 'servicename', 'nodename', 'URL' ])
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
-	ret = MigrateCluSvc(self, rc, fvars)
-	if ret is None:
-		response = req.RESPONSE
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, SERVICE_LIST, fvars['clustername']))
-	else:
-		return ret
-
-def resourceDelete(self, rc, req):
-	from LuciClusterActions import DeleteResource
-
-	fvars = GetReqVars(req,
-		[ 'clustername', 'resourcename', 'nodename', 'URL' ])
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-	clustername = fvars['clustername']
-
-	model = LuciExtractCluModel(self, req, clustername)
-	if model is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('resourceDelete0: no model')
-		return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
-
-	ret = DeleteResource(self, rc, model, fvars['resourcename'])
-	if ret is None:
-		response = req.RESPONSE
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, RESOURCES, clustername))
-	else:
-		return ret
-
-def resourceAdd(self, req, model, res):
-	from LuciClusterActions import AddResource, EditResource
-	fvars = GetReqVars(req, [ 'URL' ])
-	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
-	try:
-		cluname = model.getClusterName()
-		rc = getRicciAgent(self, cluname)
-		if rc is None:
-			raise Exception, 'no rc'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('resourceAdd0: no ricci agent: %r %s' \
-				% (e, str(e)))
-		return (False, { 'errors': [ 'Unable to find a ricci agent for cluster "%s"' % cluname ]})
-
-	if req.form.has_key('edit'):
-		ret = EditResource(self, rc, model, res)
-	else:
-		ret = AddResource(self, rc, model, res)
-
-	if ret is None:
-		response = req.RESPONSE
-		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (baseurl, RESOURCES, cluname))
-	else:
-		return ret
-
-def getSystemLogs(self, req):
-	from LuciClusterActions import GetSystemLogs
-
-	fvars = GetReqVars(req, [ 'clustername', 'nodename' ])
-	return GetSystemLogs(self, fvars)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-08-23 19:00 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-08-23 19:00 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-08-23 19:00:31

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix 253906: Quorum disk page: Error when trying to continue w/o a heuristic (it seems

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.21&r2=1.227.2.22

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/08/22 20:57:26	1.227.2.21
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/08/23 19:00:30	1.227.2.22
@@ -1109,7 +1109,7 @@
 	for i in xrange(num_heuristics):
 		try:
 			h = form['heuristic%d' % i]
-			if not h or len(h) != 3:
+			if not h or len(h) != 3 or not (h[0].strip() and h[1].strip() and h[2].strip()):
 				continue
 		except:
 			continue
@@ -1119,26 +1119,26 @@
 			if not hprog:
 				raise Exception, 'no hprog'
 		except Exception, e:
-			errors.append('No program was given for heuristic %d' % i + 1)
+			errors.append('No program was given for heuristic %d' % (i + 1))
 		try:
 			hint = int(h[1])
 			if hint < 1:
 				raise ValueError, 'Heuristic interval values must be greater than 0'
 		except KeyError, e:
-			errors.append('No interval was given for heuristic %d' % i + 1)
+			errors.append('No interval was given for heuristic %d' % (i + 1))
 		except ValueError, e:
 			errors.append('An invalid interval was given for heuristic %d: %s' \
-				% (i + 1, str(e)))
+				% ((i + 1), str(e)))
 
 		try:
 			hscore = int(h[2])
 			if hscore < 1:
 				raise ValueError, 'Heuristic scores must be greater than 0'
 		except KeyError, e:
-			errors.append('No score was given for heuristic %d' % i + 1)
+			errors.append('No score was given for heuristic %d' % (i + 1))
 		except ValueError, e:
 			errors.append('An invalid score was given for heuristic %d: %s' \
-				% (i + 1, str(e)))
+				% ((i + 1), str(e)))
 
 		heuristics.append([ hprog, hint, hscore ])
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-08-22 20:57 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-08-22 20:57 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-08-22 20:57:26

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Don't set the nodeid attribute for clusternode tags when adding a new node to a RHEL4 GULM cluster

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.20&r2=1.227.2.21

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/08/09 21:28:52	1.227.2.20
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/08/22 20:57:26	1.227.2.21
@@ -544,21 +544,32 @@
 		request.SESSION.set('add_node', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
 
+	gulm_ptr = None
+	next_node_id = 1
 	try:
 		model = getModelBuilder(None, cluster_ricci, cluster_ricci.dom0())
 		if not model:
 			errors.append('Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername)
 			raise Exception, 'unable to get model for %s' % clustername
-
 		nodesptr = model.getClusterNodesPtr()
-		used_ids = {}
+		gulm_ptr = model.getGULMPtr()
+		used_ids = []
+
 		for i in model.getNodes():
-			used_ids[int(i.getAttribute('nodeid'))] = 1
+			if not gulm_ptr:
+				used_ids.append(int(i.getAttribute('nodeid')))
+
 			node_name = str(i.getAttribute('name'))
 			if node_name in system_list:
 				system_list[node_name]['errors'] = True
 				errors.append('%s is already a member of %s' \
 					% (node_name, clustername))
+
+		if not gulm_ptr:
+			used_ids.sort()
+			used_list_len = len(used_ids)
+			if used_list_len > 0:
+				next_node_id = used_ids[used_list_len - 1] + 1
 	except Exception, e:
 		incomplete = True
 		errors.append('Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername)
@@ -569,8 +580,6 @@
 		request.SESSION.set('add_node', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
 
-	next_node_id = 1
-
 	try:
 		for x in system_list:
 			i = system_list[x]
@@ -606,13 +615,13 @@
 						% (cur_host, e, str(e)))
 				continue
 
-			next_node_id += 1
 			new_node = ClusterNode()
 			new_node.attr_hash['name'] = str(i['host'])
 			new_node.attr_hash['votes'] = str(1)
-			while next_node_id in used_ids:
+
+			if not gulm_ptr:
+				new_node.attr_hash['nodeid'] = str(next_node_id)
 				next_node_id += 1
-			new_node.attr_hash['nodeid'] = str(next_node_id)
 			nodesptr.addChild(new_node)
 
 		if incomplete or len(errors) > 0:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-05-03 19:51 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-05-03 19:51 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-05-03 19:51:21

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Add "nodename" attribute for fence_manual instances.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.254&r2=1.255

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/04/02 16:35:13	1.254
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/03 19:51:21	1.255
@@ -1878,7 +1878,10 @@
 
 					# Add back the tags under the method block
 					# for the fence instance
-					instance_list.append({'name': fencedev_name })
+					if fence_type == 'fence_manual':
+						instance_list.append({'name': fencedev_name, 'nodename': nodename })
+					else:
+						instance_list.append({'name': fencedev_name })
 		else:
 			# The user created a new fence device.
 			retcode, retmsg = validateNewFenceDevice(fence_form, model)
@@ -1892,7 +1895,10 @@
 			# so the appropriate XML goes into the <method> block inside
 			# <node><fence>. All we need for that is the device name.
 			if not 'sharable' in fence_form:
-				instance_list.append({'name': fencedev_name })
+				if fence_type == 'fence_manual':
+					instance_list.append({'name': fencedev_name, 'nodename': nodename })
+				else:
+					instance_list.append({'name': fencedev_name })
 
 		if fencedev_unknown is True:
 			# Save any instances for this fence device.



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-04-02 16:35 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-04-02 16:35 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-04-02 17:35:14

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Package name is lvm2-cluster, service name is clvmd..

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.253&r2=1.254

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/04/02 15:56:18	1.253
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/04/02 16:35:13	1.254
@@ -4836,7 +4836,7 @@
       else:
         dlist.append("lock_gulmd")
       dlist.append("rgmanager")
-      dlist.append("lvm2-cluster")
+      dlist.append("clvmd")
       dlist.append("gfs")
       dlist.append("gfs2")
       states = getDaemonStates(rc, dlist)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-04-02 15:56 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-04-02 15:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-04-02 16:56:18

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Show lvm2-cluster, gfs, and gfs2 service status and boot settings on the node information page.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.252&r2=1.253

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/27 02:03:56	1.252
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/04/02 15:56:18	1.253
@@ -4836,6 +4836,9 @@
       else:
         dlist.append("lock_gulmd")
       dlist.append("rgmanager")
+      dlist.append("lvm2-cluster")
+      dlist.append("gfs")
+      dlist.append("gfs2")
       states = getDaemonStates(rc, dlist)
       infohash['d_states'] = states
   else:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-27  2:03 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-27  2:03 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-03-27 03:03:57

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix a string format bug that could cause an exception to be raised

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.251&r2=1.252

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/16 03:19:39	1.251
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/27 02:03:56	1.252
@@ -696,7 +696,7 @@
 
 		if not success:
 			incomplete = True
-			errors.append('An error occurred while attempting to add cluster node \"%s\"')
+			errors.append('An error occurred while attempting to add cluster node \"%s\"' % cur_host)
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-16  3:19 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-16  3:19 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-03-16 03:19:40

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix a bug in the code that ensures FSIDs for fs and cluster fs resources are unique.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.250&r2=1.251

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/15 22:08:42	1.250
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/16 03:19:39	1.251
@@ -61,8 +61,8 @@
 	obj_list.extend(model.searchObjectTree('clusterfs'))
 	return map(lambda x: x.getAttribute('fsid') and int(x.getAttribute('fsid')) or 0, obj_list)
 
-def fsid_is_unique(fsid):
-	fsid_list = get_fsid_list
+def fsid_is_unique(model, fsid):
+	fsid_list = get_fsid_list(model)
 	return fsid not in fsid_list
 
 def generate_fsid(model, name):
@@ -6152,7 +6152,7 @@
 		if not fsid:
 			raise Exception, 'No filesystem ID was given for this filesystem resource.'
 		fsid_int = int(fsid)
-		if not fsid_is_unique(fsid_int):
+		if not fsid_is_unique(model, fsid_int):
 			raise Exception, 'The filesystem ID provided is not unique.'
 	except Exception, e:
 		fsid = str(generate_fsid(model, name))
@@ -6269,7 +6269,7 @@
 		if not fsid:
 			raise Exception, 'No filesystem ID was given for this cluster filesystem resource.'
 		fsid_int = int(fsid)
-		if not fsid_is_unique(fsid_int):
+		if not fsid_is_unique(model, fsid_int):
 			raise Exception, 'The filesystem ID provided is not unique.'
 	except Exception, e:
 		fsid = str(generate_fsid(model, name))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-16  3:19 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-16  3:19 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-03-16 03:19:22

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix a bug in the code that ensures FSIDs for fs and cluster fs resources are unique.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.27&r2=1.120.2.28

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/15 22:11:28	1.120.2.27
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/16 03:19:22	1.120.2.28
@@ -60,8 +60,8 @@
 	obj_list.extend(model.searchObjectTree('clusterfs'))
 	return map(lambda x: x.getAttribute('fsid') and int(x.getAttribute('fsid')) or 0, obj_list)
 
-def fsid_is_unique(fsid):
-	fsid_list = get_fsid_list
+def fsid_is_unique(model, fsid):
+	fsid_list = get_fsid_list(model)
 	return fsid not in fsid_list
 
 def generate_fsid(model, name):
@@ -6150,7 +6150,7 @@
 		if not fsid:
 			raise Exception, 'No filesystem ID was given for this filesystem resource.'
 		fsid_int = int(fsid)
-		if not fsid_is_unique(fsid_int):
+		if not fsid_is_unique(model, fsid_int):
 			raise Exception, 'The filesystem ID provided is not unique.'
 	except Exception, e:
 		fsid = str(generate_fsid(model, name))
@@ -6267,7 +6267,7 @@
 		if not fsid:
 			raise Exception, 'No filesystem ID was given for this cluster filesystem resource.'
 		fsid_int = int(fsid)
-		if not fsid_is_unique(fsid_int):
+		if not fsid_is_unique(model, fsid_int):
 			raise Exception, 'The filesystem ID provided is not unique.'
 	except Exception, e:
 		fsid = str(generate_fsid(model, name))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-16  3:19 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-16  3:19 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-03-16 03:19:01

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix a bug in the code that ensures FSIDs for fs and cluster fs resources are unique.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.17&r2=1.227.2.18

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/15 22:09:58	1.227.2.17
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/16 03:19:00	1.227.2.18
@@ -60,8 +60,8 @@
 	obj_list.extend(model.searchObjectTree('clusterfs'))
 	return map(lambda x: x.getAttribute('fsid') and int(x.getAttribute('fsid')) or 0, obj_list)
 
-def fsid_is_unique(fsid):
-	fsid_list = get_fsid_list
+def fsid_is_unique(model, fsid):
+	fsid_list = get_fsid_list(model)
 	return fsid not in fsid_list
 
 def generate_fsid(model, name):
@@ -6150,7 +6150,7 @@
 		if not fsid:
 			raise Exception, 'No filesystem ID was given for this filesystem resource.'
 		fsid_int = int(fsid)
-		if not fsid_is_unique(fsid_int):
+		if not fsid_is_unique(model, fsid_int):
 			raise Exception, 'The filesystem ID provided is not unique.'
 	except Exception, e:
 		fsid = str(generate_fsid(model, name))
@@ -6267,7 +6267,7 @@
 		if not fsid:
 			raise Exception, 'No filesystem ID was given for this cluster filesystem resource.'
 		fsid_int = int(fsid)
-		if not fsid_is_unique(fsid_int):
+		if not fsid_is_unique(model, fsid_int):
 			raise Exception, 'The filesystem ID provided is not unique.'
 	except Exception, e:
 		fsid = str(generate_fsid(model, name))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-13  3:07 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-13  3:07 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-03-13 03:07:03

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Add missing format string arg

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.15&r2=1.227.2.16

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 05:46:37	1.227.2.15
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/13 03:07:03	1.227.2.16
@@ -674,7 +674,7 @@
 
 		if not success:
 			incomplete = True
-			errors.append('An error occurred while attempting to add cluster node \"%s\"')
+			errors.append('An error occurred while attempting to add cluster node \"%s\"' % cur_host)
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-13  3:06 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-13  3:06 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-03-13 03:06:25

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Add missing format string arg

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.25&r2=1.120.2.26

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 05:46:07	1.120.2.25
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/13 03:06:24	1.120.2.26
@@ -674,7 +674,7 @@
 
 		if not success:
 			incomplete = True
-			errors.append('An error occurred while attempting to add cluster node \"%s\"')
+			errors.append('An error occurred while attempting to add cluster node \"%s\"' % cur_host)
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-12  5:47 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-12  5:47 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-03-12 05:46:59

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix the navigation portal so the correct item is highlighted when adding and editing virtual services.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.248&r2=1.249

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 04:25:41	1.248
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 05:46:59	1.249
@@ -2686,11 +2686,11 @@
   sv['cfg_type'] = "services"
   sv['absolute_url'] = url + "?pagetype=" + SERVICES + "&clustername=" + cluname
   sv['Description'] = "Service configuration for this cluster"
-  if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE:
+  if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE or pagetype == SERVICE_LIST or pagetype == VM_ADD or pagetype == VM_CONFIG:
     sv['show_children'] = True
   else:
     sv['show_children'] = False
-  if pagetype == SERVICES:
+  if pagetype == SERVICES or pagetype == SERVICE_LIST:
     sv['currentItem'] = True
   else:
     sv['currentItem'] = False
@@ -2721,11 +2721,11 @@
   svcfg['cfg_type'] = "servicecfg"
   svcfg['absolute_url'] = url + "?pagetype=" + SERVICE_CONFIG + "&clustername=" + cluname
   svcfg['Description'] = "Configure a Service for this cluster"
-  if pagetype == SERVICE_CONFIG or pagetype == SERVICE:
+  if pagetype == SERVICE_CONFIG or pagetype == SERVICE or pagetype == VM_CONFIG:
     svcfg['show_children'] = True
   else:
     svcfg['show_children'] = False
-  if pagetype == SERVICE_CONFIG:
+  if pagetype == SERVICE_CONFIG or pagetype == VM_CONFIG:
     svcfg['currentItem'] = True
   else:
     svcfg['currentItem'] = False



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-12  5:46 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-12  5:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-03-12 05:46:38

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix the navigation portal so the correct item is highlighted when adding and editing virtual services.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.14&r2=1.227.2.15

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 04:24:34	1.227.2.14
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 05:46:37	1.227.2.15
@@ -2684,11 +2684,11 @@
   sv['cfg_type'] = "services"
   sv['absolute_url'] = url + "?pagetype=" + SERVICES + "&clustername=" + cluname
   sv['Description'] = "Service configuration for this cluster"
-  if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE:
+  if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE or pagetype == SERVICE_LIST or pagetype == VM_ADD or pagetype == VM_CONFIG:
     sv['show_children'] = True
   else:
     sv['show_children'] = False
-  if pagetype == SERVICES:
+  if pagetype == SERVICES or pagetype == SERVICE_LIST:
     sv['currentItem'] = True
   else:
     sv['currentItem'] = False
@@ -2719,11 +2719,11 @@
   svcfg['cfg_type'] = "servicecfg"
   svcfg['absolute_url'] = url + "?pagetype=" + SERVICE_CONFIG + "&clustername=" + cluname
   svcfg['Description'] = "Configure a Service for this cluster"
-  if pagetype == SERVICE_CONFIG or pagetype == SERVICE:
+  if pagetype == SERVICE_CONFIG or pagetype == SERVICE or pagetype == VM_CONFIG:
     svcfg['show_children'] = True
   else:
     svcfg['show_children'] = False
-  if pagetype == SERVICE_CONFIG:
+  if pagetype == SERVICE_CONFIG or pagetype == VM_CONFIG:
     svcfg['currentItem'] = True
   else:
     svcfg['currentItem'] = False



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-12  5:46 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-12  5:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-03-12 05:46:07

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix the navigation portal so the correct item is highlighted when adding and editing virtual services.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.24&r2=1.120.2.25

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 04:22:26	1.120.2.24
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/12 05:46:07	1.120.2.25
@@ -2684,11 +2684,11 @@
   sv['cfg_type'] = "services"
   sv['absolute_url'] = url + "?pagetype=" + SERVICES + "&clustername=" + cluname
   sv['Description'] = "Service configuration for this cluster"
-  if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE:
+  if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE or pagetype == SERVICE_LIST or pagetype == VM_ADD or pagetype == VM_CONFIG:
     sv['show_children'] = True
   else:
     sv['show_children'] = False
-  if pagetype == SERVICES:
+  if pagetype == SERVICES or pagetype == SERVICE_LIST:
     sv['currentItem'] = True
   else:
     sv['currentItem'] = False
@@ -2719,11 +2719,11 @@
   svcfg['cfg_type'] = "servicecfg"
   svcfg['absolute_url'] = url + "?pagetype=" + SERVICE_CONFIG + "&clustername=" + cluname
   svcfg['Description'] = "Configure a Service for this cluster"
-  if pagetype == SERVICE_CONFIG or pagetype == SERVICE:
+  if pagetype == SERVICE_CONFIG or pagetype == SERVICE or pagetype == VM_CONFIG:
     svcfg['show_children'] = True
   else:
     svcfg['show_children'] = False
-  if pagetype == SERVICE_CONFIG:
+  if pagetype == SERVICE_CONFIG or pagetype == VM_CONFIG:
     svcfg['currentItem'] = True
   else:
     svcfg['currentItem'] = False



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-06 22:48 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-06 22:48 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-03-06 22:48:19

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix an indentation error (not present in other branches)

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.246&r2=1.247

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/05 16:50:43	1.246
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/06 22:48:19	1.247
@@ -3448,7 +3448,7 @@
 					starturl['nodename'] = node.getName()
 					starturl['url'] = baseurl + '?' + 'clustername=' + cluname +'&servicename=' + item['name'] + '&pagetype=' + SERVICE_START + '&nodename=' + node.getName()
 					starturls.append(starturl)
-					itemmap['links'] = starturls
+			itemmap['links'] = starturls
 
 			try:
 				svc = model.retrieveServiceByName(item['name'])



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-01 20:22 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-01 20:22 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-03-01 20:22:34

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	- Don't leave behind empty clusters when cluster creation fails
	- Pass manage_delObjects() a list in a few places it was getting a string
	Related: bz 230466

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.21&r2=1.120.2.22

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/01 00:31:08	1.120.2.21
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/01 20:22:33	1.120.2.22
@@ -3293,17 +3293,26 @@
 			% (clustername, cluster_path, str(e)))
 		return results
 
-	for node in nodelist:
+	if len(nodelist) < 1:
+		luci_log.debug_verbose('GCSDB0a: removing cluster %s because it has no nodes' % clustername)
 		try:
-			node_val = {}
-			node_val['type'] = 'node'
-			node_val['name'] = node[0]
-			node_val['clustered'] = '[unknown]'
-			node_val['online'] = '[unknown]'
-			node_val['error'] = True
-			results.append(node_val)
+			clusters_dir = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+			clusters_dir.manage_delObjects([clustername])
 		except Exception, e:
-			luci_log.debug_verbose('GCSDB1: %s' % str(e))
+			luci_log.debug_verbose('GCSDB0b: %s: %s' % (clustername, str(e)))
+	else:
+		for node in nodelist:
+			try:
+				node_val = {}
+				node_val['type'] = 'node'
+				node_val['name'] = node[0]
+				node_val['clustered'] = '[unknown]'
+				node_val['online'] = '[unknown]'
+				node_val['error'] = True
+				results.append(node_val)
+			except Exception, e:
+				luci_log.debug_verbose('GCSDB1: %s' % str(e))
+
 	return results
 
 def getClusterStatus(self, request, rc, cluname=None):
@@ -4184,7 +4193,7 @@
 			errors += 1
 			continue
 		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('CStart1: nodeLeave %s' % nodename_resolved)
+			luci_log.debug_verbose('CStart1: nodeJoin %s' % nodename_resolved)
 			errors += 1
 
 	return errors
@@ -5444,12 +5453,12 @@
           redirect_message = True
 
         luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
-        clusterfolder.manage_delObjects(item[0])
+        clusterfolder.manage_delObjects([item[0]])
         continue
 
-
-
+      del_db_obj = False
       if creation_status < 0:  #an error was encountered
+        luci_log.debug_verbose('ICB13a: %s: CS %d for %s' % (cluname, creation_status, ricci[0]))
         if creation_status == RICCI_CONNECT_FAILURE:
           laststatus = item[1].getProperty(LAST_STATUS)
           if laststatus == INSTALL_TASK: #This means maybe node is rebooting
@@ -5479,14 +5488,17 @@
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, INSTALL_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[INSTALL_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(DISABLE_SVC_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, DISABLE_SVC_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[DISABLE_SVC_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(REBOOT_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, REBOOT_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[REBOOT_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(SEND_CONF):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, SEND_CONF)
@@ -5500,11 +5512,15 @@
           (err_code, err_msg) = extract_module_status(batch_xml, START_NODE)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[START_NODE]
         else:
+          del_db_obj = True
           node_report['iserror'] = True
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[0]
 
         try:
-          clusterfolder.manage_delObjects(item[0])
+          if del_db_obj is True:
+            luci_log.debug_verbose('ICB13a: %s node creation failed for %s: %d: deleting DB entry' % (cluname, ricci[0], creation_status))
+            clusterfolder.manage_delObjects([ricci[0]])
+          clusterfolder.manage_delObjects([item[0]])
         except Exception, e:
           luci_log.debug_verbose('ICB14: delObjects: %s: %s' \
             % (item[0], str(e)))
@@ -5518,7 +5534,7 @@
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
           try:
-              clusterfolder.manage_delObjects(item[0])
+              clusterfolder.manage_delObjects([item[0]])
           except Exception, e:
               luci_log.info('ICB15: Unable to delete %s: %s' % (item[0], str(e)))
           continue
@@ -5569,7 +5585,7 @@
           node_report['desc'] = flag_msg + flag_desc + REDIRECT_MSG
         nodereports.append(node_report)
         try:
-            clusterfolder.manage_delObjects(item[0])
+            clusterfolder.manage_delObjects([item[0]])
         except Exception, e:
             luci_log.info('ICB16: Unable to delete %s: %s' % (item[0], str(e)))
       else:
@@ -7352,7 +7368,7 @@
 			if finished == -1:
 				luci_log.debug_verbose('NNFP2: batch error: %s' % batch_ret[1])
 			try:
-				nodefolder.manage_delObjects(item[0])
+				nodefolder.manage_delObjects([item[0]])
 			except Exception, e:
 				luci_log.info('NNFP3: manage_delObjects for %s failed: %s' \
 					% (item[0], str(e)))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-01 20:22 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-01 20:22 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-03-01 20:22:31

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	- Don't leave behind empty clusters when cluster creation fails
	- Pass manage_delObjects() a list in a few places it was getting a string
	Related: bz 230466

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.11&r2=1.227.2.12

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/22 20:52:07	1.227.2.11
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/01 20:22:31	1.227.2.12
@@ -3293,17 +3293,26 @@
 			% (clustername, cluster_path, str(e)))
 		return results
 
-	for node in nodelist:
+	if len(nodelist) < 1:
+		luci_log.debug_verbose('GCSDB0a: removing cluster %s because it has no nodes' % clustername)
 		try:
-			node_val = {}
-			node_val['type'] = 'node'
-			node_val['name'] = node[0]
-			node_val['clustered'] = '[unknown]'
-			node_val['online'] = '[unknown]'
-			node_val['error'] = True
-			results.append(node_val)
+			clusters_dir = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+			clusters_dir.manage_delObjects([clustername])
 		except Exception, e:
-			luci_log.debug_verbose('GCSDB1: %s' % str(e))
+			luci_log.debug_verbose('GCSDB0b: %s: %s' % (clustername, str(e)))
+	else:
+		for node in nodelist:
+			try:
+				node_val = {}
+				node_val['type'] = 'node'
+				node_val['name'] = node[0]
+				node_val['clustered'] = '[unknown]'
+				node_val['online'] = '[unknown]'
+				node_val['error'] = True
+				results.append(node_val)
+			except Exception, e:
+				luci_log.debug_verbose('GCSDB1: %s' % str(e))
+
 	return results
 
 def getClusterStatus(self, request, rc, cluname=None):
@@ -4184,7 +4193,7 @@
 			errors += 1
 			continue
 		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('CStart1: nodeLeave %s' % nodename_resolved)
+			luci_log.debug_verbose('CStart1: nodeJoin %s' % nodename_resolved)
 			errors += 1
 
 	return errors
@@ -5444,12 +5453,12 @@
           redirect_message = True
 
         luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
-        clusterfolder.manage_delObjects(item[0])
+        clusterfolder.manage_delObjects([item[0]])
         continue
 
-
-
+      del_db_obj = False
       if creation_status < 0:  #an error was encountered
+        luci_log.debug_verbose('ICB13a: %s: CS %d for %s' % (cluname, creation_status, ricci[0]))
         if creation_status == RICCI_CONNECT_FAILURE:
           laststatus = item[1].getProperty(LAST_STATUS)
           if laststatus == INSTALL_TASK: #This means maybe node is rebooting
@@ -5479,14 +5488,17 @@
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, INSTALL_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[INSTALL_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(DISABLE_SVC_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, DISABLE_SVC_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[DISABLE_SVC_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(REBOOT_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, REBOOT_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[REBOOT_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(SEND_CONF):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, SEND_CONF)
@@ -5500,11 +5512,15 @@
           (err_code, err_msg) = extract_module_status(batch_xml, START_NODE)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[START_NODE]
         else:
+          del_db_obj = True
           node_report['iserror'] = True
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[0]
 
         try:
-          clusterfolder.manage_delObjects(item[0])
+          if del_db_obj is True:
+            luci_log.debug_verbose('ICB13a: %s node creation failed for %s: %d: deleting DB entry' % (cluname, ricci[0], creation_status))
+            clusterfolder.manage_delObjects([ricci[0]])
+          clusterfolder.manage_delObjects([item[0]])
         except Exception, e:
           luci_log.debug_verbose('ICB14: delObjects: %s: %s' \
             % (item[0], str(e)))
@@ -5518,7 +5534,7 @@
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
           try:
-              clusterfolder.manage_delObjects(item[0])
+              clusterfolder.manage_delObjects([item[0]])
           except Exception, e:
               luci_log.info('ICB15: Unable to delete %s: %s' % (item[0], str(e)))
           continue
@@ -5569,7 +5585,7 @@
           node_report['desc'] = flag_msg + flag_desc + REDIRECT_MSG
         nodereports.append(node_report)
         try:
-            clusterfolder.manage_delObjects(item[0])
+            clusterfolder.manage_delObjects([item[0]])
         except Exception, e:
             luci_log.info('ICB16: Unable to delete %s: %s' % (item[0], str(e)))
       else:
@@ -7352,7 +7368,7 @@
 			if finished == -1:
 				luci_log.debug_verbose('NNFP2: batch error: %s' % batch_ret[1])
 			try:
-				nodefolder.manage_delObjects(item[0])
+				nodefolder.manage_delObjects([item[0]])
 			except Exception, e:
 				luci_log.info('NNFP3: manage_delObjects for %s failed: %s' \
 					% (item[0], str(e)))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-03-01 20:22 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-03-01 20:22 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-03-01 20:22:29

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	- Don't leave behind empty clusters when cluster creation fails
	- Pass manage_delObjects() a list in a few places it was getting a string
	Related: bz 230466

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.244&r2=1.245

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/23 22:07:45	1.244
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/03/01 20:22:29	1.245
@@ -3295,17 +3295,26 @@
 			% (clustername, cluster_path, str(e)))
 		return results
 
-	for node in nodelist:
+	if len(nodelist) < 1:
+		luci_log.debug_verbose('GCSDB0a: removing cluster %s because it has no nodes' % clustername)
 		try:
-			node_val = {}
-			node_val['type'] = 'node'
-			node_val['name'] = node[0]
-			node_val['clustered'] = '[unknown]'
-			node_val['online'] = '[unknown]'
-			node_val['error'] = True
-			results.append(node_val)
+			clusters_dir = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+			clusters_dir.manage_delObjects([clustername])
 		except Exception, e:
-			luci_log.debug_verbose('GCSDB1: %s' % str(e))
+			luci_log.debug_verbose('GCSDB0b: %s: %s' % (clustername, str(e)))
+	else:
+		for node in nodelist:
+			try:
+				node_val = {}
+				node_val['type'] = 'node'
+				node_val['name'] = node[0]
+				node_val['clustered'] = '[unknown]'
+				node_val['online'] = '[unknown]'
+				node_val['error'] = True
+				results.append(node_val)
+			except Exception, e:
+				luci_log.debug_verbose('GCSDB1: %s' % str(e))
+
 	return results
 
 def getClusterStatus(self, request, rc, cluname=None):
@@ -4186,7 +4195,7 @@
 			errors += 1
 			continue
 		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('CStart1: nodeLeave %s' % nodename_resolved)
+			luci_log.debug_verbose('CStart1: nodeJoin %s' % nodename_resolved)
 			errors += 1
 
 	return errors
@@ -5446,12 +5455,12 @@
           redirect_message = True
 
         luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
-        clusterfolder.manage_delObjects(item[0])
+        clusterfolder.manage_delObjects([item[0]])
         continue
 
-
-
+      del_db_obj = False
       if creation_status < 0:  #an error was encountered
+        luci_log.debug_verbose('ICB13a: %s: CS %d for %s' % (cluname, creation_status, ricci[0]))
         if creation_status == RICCI_CONNECT_FAILURE:
           laststatus = item[1].getProperty(LAST_STATUS)
           if laststatus == INSTALL_TASK: #This means maybe node is rebooting
@@ -5481,14 +5490,17 @@
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, INSTALL_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[INSTALL_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(DISABLE_SVC_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, DISABLE_SVC_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[DISABLE_SVC_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(REBOOT_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, REBOOT_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[REBOOT_TASK] + err_msg
+          del_db_obj = True
         elif creation_status == -(SEND_CONF):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, SEND_CONF)
@@ -5502,11 +5514,15 @@
           (err_code, err_msg) = extract_module_status(batch_xml, START_NODE)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[START_NODE]
         else:
+          del_db_obj = True
           node_report['iserror'] = True
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[0]
 
         try:
-          clusterfolder.manage_delObjects(item[0])
+          if del_db_obj is True:
+            luci_log.debug_verbose('ICB13a: %s node creation failed for %s: %d: deleting DB entry' % (cluname, ricci[0], creation_status))
+            clusterfolder.manage_delObjects([ricci[0]])
+          clusterfolder.manage_delObjects([item[0]])
         except Exception, e:
           luci_log.debug_verbose('ICB14: delObjects: %s: %s' \
             % (item[0], str(e)))
@@ -5520,7 +5536,7 @@
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
           try:
-              clusterfolder.manage_delObjects(item[0])
+              clusterfolder.manage_delObjects([item[0]])
           except Exception, e:
               luci_log.info('ICB15: Unable to delete %s: %s' % (item[0], str(e)))
           continue
@@ -5571,7 +5587,7 @@
           node_report['desc'] = flag_msg + flag_desc + REDIRECT_MSG
         nodereports.append(node_report)
         try:
-            clusterfolder.manage_delObjects(item[0])
+            clusterfolder.manage_delObjects([item[0]])
         except Exception, e:
             luci_log.info('ICB16: Unable to delete %s: %s' % (item[0], str(e)))
       else:
@@ -7354,7 +7370,7 @@
 			if finished == -1:
 				luci_log.debug_verbose('NNFP2: batch error: %s' % batch_ret[1])
 			try:
-				nodefolder.manage_delObjects(item[0])
+				nodefolder.manage_delObjects([item[0]])
 			except Exception, e:
 				luci_log.info('NNFP3: manage_delObjects for %s failed: %s' \
 					% (item[0], str(e)))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-02-13 19:50 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-02-13 19:50 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-02-13 19:50:58

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix for service info display when a gulm cluster is unresponsive.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.237&r2=1.238

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/12 20:24:28	1.237
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/13 19:50:58	1.238
@@ -4569,7 +4569,9 @@
   infohash['currentservices'] = svc_dict_list
 
   fdom_dict_list = list()
+  gulm_cluster = False
   if model:
+    gulm_cluster = model.getGULMPtr() is not None
     try:
       infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
     except:
@@ -4607,7 +4609,7 @@
     if rc is not None:
       dlist = list()
       dlist.append("ccsd")
-      if model.getGULMPtr() is None:
+      if not gulm_cluster:
         dlist.append("cman")
         dlist.append("fenced")
       else:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-02-12 20:25 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-02-12 20:25 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-02-12 20:25:42

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 
	                           ricci_communicator.py 

Log message:
	- A handful of fixes for bugs that show up when unlikely exceptions are raised
	- Fix for a bug that could cause luci to fail to try to authenticate to ricci agents

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.5&r2=1.227.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.48.2.1&r2=1.48.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.24&r2=1.24.2.1

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/08 15:59:20	1.227.2.5
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/12 20:25:42	1.227.2.6
@@ -323,7 +323,9 @@
 		try:
 			resultNode = rc.process_batch(batchNode, async=True)
 			batch_id_map[i] = resultNode.getAttribute('batch_id')
-		except:
+		except Exception, e:
+			luci_log.debug_verbose('validateCreateCluster0: %s: %s' \
+				% (i, str(e)))
 			errors.append('An error occurred while attempting to add cluster node \"%s\"' % i)
 			if len(batch_id_map) == 0:
 				request.SESSION.set('create_cluster', add_cluster)
@@ -448,6 +450,7 @@
 
 			prev_auth = rc.authed()
 			cur_system['prev_auth'] = prev_auth
+
 			try:
 				if prev_auth:
 					messages.append('Host %s is already authenticated.' \
@@ -457,7 +460,7 @@
 
 				if not rc.authed():
 					raise Exception, 'authentication failed'
-			except:
+			except Exception, e:
 				cur_system['errors'] = True
 				incomplete = True
 				errors.append('Error authenticating to %s: %s' \
@@ -617,7 +620,7 @@
 				break
 			if code == -1:
 				errors.append(batch_ret[1])
-				raise Exception, batch_ret[1]
+				raise Exception, str(batch_ret[1])
 			if code == False:
 				time.sleep(0.5)
 	except Exception, e:
@@ -4137,13 +4140,13 @@
 
 def clusterDelete(self, model):
 	num_errors = clusterStop(self, model, delete=True)
-	if num_errors < 1:
-		try:
-			clustername = model.getClusterName()
-		except Exception, e:
-			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
-			return None
+	try:
+		clustername = model.getClusterName()
+	except Exception, e:
+		luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+		return None
 
+	if num_errors < 1:
 		try:
 			delCluster(self, clustername)
 		except Exception, e:
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2007/02/09 18:32:04	1.48.2.1
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2007/02/12 20:25:42	1.48.2.2
@@ -156,7 +156,7 @@
 		if len(sysData) < 2 or not sysData[1]:
 			raise Exception, 'no password'
 		cur_pass = sysData[1]
-		cur_entry['passwd'] = ''
+		cur_entry['passwd'] = cur_pass
 	except:
 		luci_log.debug_verbose('vACI1: %s no password given')
 		request.SESSION.set('add_cluster_initial', cur_entry)
@@ -338,7 +338,7 @@
 	for i in node_list:
 		cur_node = { 'host': i }
 		if same_node_passwds:
-			cur_node['passwd'] = ''
+			cur_node['passwd'] = cur_pass
 		add_cluster['nodes'][i] = cur_node
 	request.SESSION.set('add_cluster', add_cluster)
 	request.response.redirect('/luci/homebase/index_html?pagetype=%s' % HOMEBASE_ADD_CLUSTER)
@@ -382,7 +382,7 @@
 			cur_passwd = None
 		else:
 			cur_passwd = sysData[1]
-			cur_system['passwd'] = ''
+			cur_system['passwd'] = cur_passwd
 
 		try:
 			cur_fp = request.form['__SYSTEM%dFingerprint' % i].strip()
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2007/01/04 00:19:49	1.24
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2007/02/12 20:25:42	1.24.2.1
@@ -165,7 +165,7 @@
             pass
 
         if not self.authed():
-            raise RicciError, 'not authenticated to host %s', self.__hostname
+            raise RicciError, 'not authenticated to host %s' % self.__hostname
         
         # construct request
         doc = minidom.Document()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-02-12 20:24 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-02-12 20:24 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-02-12 20:24:28

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 
	                           ricci_communicator.py 

Log message:
	- A handful of fixes for bugs that show up when unlikely exceptions are raised
	- Fix for a bug that could cause luci to fail to try to authenticate to ricci agents

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.236&r2=1.237
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.49&r2=1.50
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.24&r2=1.25

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/08 16:00:36	1.236
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/12 20:24:28	1.237
@@ -323,7 +323,9 @@
 		try:
 			resultNode = rc.process_batch(batchNode, async=True)
 			batch_id_map[i] = resultNode.getAttribute('batch_id')
-		except:
+		except Exception, e:
+			luci_log.debug_verbose('validateCreateCluster0: %s: %s' \
+				% (i, str(e)))
 			errors.append('An error occurred while attempting to add cluster node \"%s\"' % i)
 			if len(batch_id_map) == 0:
 				request.SESSION.set('create_cluster', add_cluster)
@@ -448,6 +450,7 @@
 
 			prev_auth = rc.authed()
 			cur_system['prev_auth'] = prev_auth
+
 			try:
 				if prev_auth:
 					messages.append('Host %s is already authenticated.' \
@@ -457,7 +460,7 @@
 
 				if not rc.authed():
 					raise Exception, 'authentication failed'
-			except:
+			except Exception, e:
 				cur_system['errors'] = True
 				incomplete = True
 				errors.append('Error authenticating to %s: %s' \
@@ -617,7 +620,7 @@
 				break
 			if code == -1:
 				errors.append(batch_ret[1])
-				raise Exception, batch_ret[1]
+				raise Exception, str(batch_ret[1])
 			if code == False:
 				time.sleep(0.5)
 	except Exception, e:
@@ -4137,13 +4140,13 @@
 
 def clusterDelete(self, model):
 	num_errors = clusterStop(self, model, delete=True)
-	if num_errors < 1:
-		try:
-			clustername = model.getClusterName()
-		except Exception, e:
-			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
-			return None
+	try:
+		clustername = model.getClusterName()
+	except Exception, e:
+		luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+		return None
 
+	if num_errors < 1:
 		try:
 			delCluster(self, clustername)
 		except Exception, e:
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2007/02/09 18:30:44	1.49
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2007/02/12 20:24:28	1.50
@@ -156,7 +156,7 @@
 		if len(sysData) < 2 or not sysData[1]:
 			raise Exception, 'no password'
 		cur_pass = sysData[1]
-		cur_entry['passwd'] = ''
+		cur_entry['passwd'] = cur_pass
 	except:
 		luci_log.debug_verbose('vACI1: %s no password given')
 		request.SESSION.set('add_cluster_initial', cur_entry)
@@ -338,7 +338,7 @@
 	for i in node_list:
 		cur_node = { 'host': i }
 		if same_node_passwds:
-			cur_node['passwd'] = ''
+			cur_node['passwd'] = cur_pass
 		add_cluster['nodes'][i] = cur_node
 	request.SESSION.set('add_cluster', add_cluster)
 	request.response.redirect('/luci/homebase/index_html?pagetype=%s' % HOMEBASE_ADD_CLUSTER)
@@ -382,7 +382,7 @@
 			cur_passwd = None
 		else:
 			cur_passwd = sysData[1]
-			cur_system['passwd'] = ''
+			cur_system['passwd'] = cur_passwd
 
 		try:
 			cur_fp = request.form['__SYSTEM%dFingerprint' % i].strip()
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2007/01/04 00:19:49	1.24
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2007/02/12 20:24:28	1.25
@@ -165,7 +165,7 @@
             pass
 
         if not self.authed():
-            raise RicciError, 'not authenticated to host %s', self.__hostname
+            raise RicciError, 'not authenticated to host %s' % self.__hostname
         
         # construct request
         doc = minidom.Document()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-02-07 22:00 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-02-07 22:00 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-02-07 22:00:50

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix for 227723

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.230&r2=1.231

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/07 16:55:15	1.230
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/07 22:00:50	1.231
@@ -162,7 +162,7 @@
 
 				if not rc.authed():
 					raise Exception, 'authentication failed'
-			except:
+			except Exception, e:
 				cur_system['errors'] = True
 				incomplete = True
 				errors.append('Error authenticating to %s: %s' \



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-02-07 21:30 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-02-07 21:30 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-02-07 21:30:34

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix use of undefined variable in an error path

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227.2.2&r2=1.227.2.3

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/07 17:02:18	1.227.2.2
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/07 21:30:33	1.227.2.3
@@ -162,7 +162,7 @@
 
 				if not rc.authed():
 					raise Exception, 'authentication failed'
-			except:
+			except Exception, e:
 				cur_system['errors'] = True
 				incomplete = True
 				errors.append('Error authenticating to %s: %s' \



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-02-05 19:56 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-02-05 19:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-02-05 19:56:18

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Get rid of fenced property info in one more place for GULM clusters.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.228&r2=1.229

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/05 19:52:44	1.228
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/02/05 19:56:18	1.229
@@ -3512,23 +3512,24 @@
   #-------------
   #new cluster params - if rhel5
   #-------------
-  #Fence Daemon Props
-  fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB
-  clumap['fencedaemon_url'] = fencedaemon_url
-  fdp = model.getFenceDaemonPtr()
-  pjd = fdp.getAttribute('post_join_delay')
-  if pjd is None:
-    pjd = "6"
-  pfd = fdp.getAttribute('post_fail_delay')
-  if pfd is None:
-    pfd = "0"
-  #post join delay
-  clumap['pjd'] = pjd
-  #post fail delay
-  clumap['pfd'] = pfd
 
   gulm_ptr = model.getGULMPtr()
   if not gulm_ptr:
+    #Fence Daemon Props
+    fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB
+    clumap['fencedaemon_url'] = fencedaemon_url
+    fdp = model.getFenceDaemonPtr()
+    pjd = fdp.getAttribute('post_join_delay')
+    if pjd is None:
+      pjd = "6"
+    pfd = fdp.getAttribute('post_fail_delay')
+    if pfd is None:
+      pfd = "0"
+    #post join delay
+    clumap['pjd'] = pjd
+    #post fail delay
+    clumap['pfd'] = pfd
+
     #-------------
     #if multicast
     multicast_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_MCAST_TAB



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-31 23:45 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-31 23:45 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-31 23:45:09

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	show lock_gulmd config properties in the daemon info area for nodes in a GULM cluster

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.222&r2=1.223

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/31 23:36:26	1.222
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/31 23:45:09	1.223
@@ -4252,7 +4252,10 @@
     if rc is not None:
       dlist = list()
       dlist.append("ccsd")
-      dlist.append("cman")
+      if model.getGULMPtr() is None:
+        dlist.append("cman")
+      else:
+        dlist.append("lock_gulmd")
       dlist.append("fenced")
       dlist.append("rgmanager")
       states = getDaemonStates(rc, dlist)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-31 19:28 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-31 19:28 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-31 19:28:08

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	More GULM deploy tweaks

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.220&r2=1.221
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.55&r2=1.56

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/31 18:50:29	1.220
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/31 19:28:08	1.221
@@ -557,7 +557,8 @@
 								True,
 								shared_storage,
 								False,
-								download_pkgs)
+								download_pkgs,
+								model.GULM_ptr is not None)
 				if not batch_node:
 					raise Exception, 'batch is blank'
 				system_list[x]['batch'] = batch_node
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2007/01/31 05:26:45	1.55
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2007/01/31 19:28:08	1.56
@@ -47,7 +47,8 @@
 			install_services,
 			install_shared_storage,
 			install_LVS,
-			upgrade_rpms):
+			upgrade_rpms,
+			gulm):
 	
 	batch = '<?xml version="1.0" ?>'
 	batch += '<batch>'
@@ -63,7 +64,10 @@
 	batch += '"/>'
 	batch += '<var name="sets" type="list_xml">'
 	if install_base or install_services or install_shared_storage:
-		batch += '<set name="Cluster Base"/>'
+		if gulm:
+			batch += '<set name="Cluster Base - Gulm"/>'
+		else:
+			batch += '<set name="Cluster Base"/>'
 	if install_services:
 		batch += '<set name="Cluster Service Manager"/>'
 	if install_shared_storage:
@@ -80,7 +84,10 @@
 	batch += '<function_call name="disable">'
 	batch += '<var mutable="false" name="services" type="list_xml">'
 	if install_base or install_services or install_shared_storage:
-		batch += '<set name="Cluster Base"/>'
+		if gulm:
+			batch += '<set name="Cluster Base - Gulm"/>'
+		else:
+			batch += '<set name="Cluster Base"/>'
 	if install_services:
 		batch += '<set name="Cluster Service Manager"/>'
 	if install_shared_storage:
@@ -170,7 +177,10 @@
 	batch += '"/>'
 	batch += '<var name="sets" type="list_xml">'
 	if install_base or install_services or install_shared_storage:
-		batch += '<set name="Cluster Base"/>'
+		if gulm_lockservers:
+			batch += '<set name="Cluster Base - Gulm"/>'
+		else:
+			batch += '<set name="Cluster Base"/>'
 	if install_services:
 		batch += '<set name="Cluster Service Manager"/>'
 	if install_shared_storage:
@@ -187,7 +197,10 @@
 	batch += '<function_call name="disable">'
 	batch += '<var mutable="false" name="services" type="list_xml">'
 	if install_base or install_services or install_shared_storage:
-		batch += '<set name="Cluster Base"/>'
+		if gulm_lockservers:
+			batch += '<set name="Cluster Base - Gulm"/>'
+		else:
+			batch += '<set name="Cluster Base"/>'
 	if install_services:
 		batch += '<set name="Cluster Service Manager"/>'
 	if install_shared_storage:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-31 18:50 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-31 18:50 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-31 18:50:29

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	- redirect to the cluster list page after deleting a cluster
	- catch a couple of exceptions that could be hit when deleting a non-GULM cluster

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.219&r2=1.220

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/31 05:26:45	1.219
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/31 18:50:29	1.220
@@ -3323,6 +3323,7 @@
 	if not model:
 		return 'Unable to get the model object for %s' % cluname
 
+	redirect_page = NODES
 	if task == CLUSTER_STOP:
 		clusterStop(self, model)
 	elif task == CLUSTER_START:
@@ -3330,13 +3331,15 @@
 	elif task == CLUSTER_RESTART:
 		clusterRestart(self, model)
 	elif task == CLUSTER_DELETE:
-		clusterStop(self, model, delete=True)
+		ret = clusterDelete(self, model)
+		if ret is not None:
+			redirect_page = ret
 	else:
 		return 'An unknown cluster task was requested.'
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], NODES, model.getClusterName()))
+		% (request['URL'], redirect_page, model.getClusterName()))
 
 def getClusterInfo(self, model, req):
   try:
@@ -3706,6 +3709,7 @@
 		except Exception, e:
 			luci_log.debug_verbose('clusterDelete2: %s %s' \
 				% (clustername, str(e)))
+		return CLUSTERLIST
 	else:
 		luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
 			% (clustername, num_errors))
@@ -4116,7 +4120,10 @@
 
   fdom_dict_list = list()
   if model:
-    infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
+    try:
+      infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
+    except:
+      infohash['gulm_lockserver'] = False
     #next is faildoms
     fdoms = model.getFailoverDomainsForNode(nodename)
     for fdom in fdoms:
@@ -4197,7 +4204,10 @@
     map = {}
     name = item['name']
     map['nodename'] = name
-    map['gulm_lockserver'] = model.isNodeLockserver(name)
+    try:
+      map['gulm_lockserver'] = model.isNodeLockserver(name)
+    except:
+      map['gulm_lockserver'] = False
 
     try:
       baseurl = req['URL']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-30 21:41 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2007-01-30 21:41 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2007-01-30 21:41:57

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	typo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.217&r2=1.218

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/30 21:21:35	1.217
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/30 21:41:56	1.218
@@ -4729,7 +4729,7 @@
   except KeyError, e:
     svcname = None
   urlstring = baseurl + "?" + clustername + "&pagetype=29"
-  if svc != None:
+  if svcname != None:
     urlstring = urlstring + "&servicename=" + svcname
 
   map['formurl'] = urlstring



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-30 21:21 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2007-01-30 21:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2007-01-30 21:21:36

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	indent errors

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.216&r2=1.217

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/30 21:05:15	1.216
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/30 21:21:35	1.217
@@ -4734,27 +4734,27 @@
 
   map['formurl'] = urlstring
 
-	try:
-		xenvmname = request['servicename']
-	except:
-		try:
-			xenvmname = request.form['servicename']
-		except:
-			luci_log.debug_verbose('servicename is missing from request')
-			return map
+  try:
+    xenvmname = request['servicename']
+  except:
+    try:
+      xenvmname = request.form['servicename']
+    except:
+      luci_log.debug_verbose('servicename is missing from request')
+      return map
 
-	try:
-		xenvm = model.retrieveXenVMsByName(xenvmname)
-	except:
-		luci_log.debug('An error occurred while attempting to get VM %s' \
-			% xenvmname)
-		return map
+  try:
+    xenvm = model.retrieveXenVMsByName(xenvmname)
+  except:
+    luci_log.debug('An error occurred while attempting to get VM %s' \
+    % xenvmname)
+    return map
 
-	attrs= xenvm.getAttributes()
+  attrs= xenvm.getAttributes()
   keys = attrs.keys()
   for key in keys:
     map[key] = attrs[key]
-	return map
+  return map
 
 def isClusterBusy(self, req):
   items = None



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-30 21:05 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2007-01-30 21:05 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2007-01-30 21:05:17

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix for VM forms

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.215&r2=1.216

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/29 23:30:00	1.215
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/30 21:05:15	1.216
@@ -4720,6 +4720,20 @@
   setClusterConf(rc, stringbuf)
 
 def getXenVMInfo(self, model, request):
+  map = {}
+  baseurl = request['URL']
+  clustername = request['clustername']
+  svcname = None
+  try:
+    svcname = request['servicename']
+  except KeyError, e:
+    svcname = None
+  urlstring = baseurl + "?" + clustername + "&pagetype=29"
+  if svc != None:
+    urlstring = urlstring + "&servicename=" + svcname
+
+  map['formurl'] = urlstring
+
 	try:
 		xenvmname = request['servicename']
 	except:
@@ -4727,16 +4741,19 @@
 			xenvmname = request.form['servicename']
 		except:
 			luci_log.debug_verbose('servicename is missing from request')
-			return {}
+			return map
 
 	try:
 		xenvm = model.retrieveXenVMsByName(xenvmname)
 	except:
 		luci_log.debug('An error occurred while attempting to get VM %s' \
 			% xenvmname)
-		return {}
+		return map
 
-	map = xenvm.getAttributes()
+	attrs= xenvm.getAttributes()
+  keys = attrs.keys()
+  for key in keys:
+    map[key] = attrs[key]
 	return map
 
 def isClusterBusy(self, req):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-29 23:30 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-29 23:30 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-29 23:30:01

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 

Log message:
	allow external gulm lockservers to be removed in the GULM preferences tab

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.214&r2=1.215
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.47&r2=1.48

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/29 16:56:50	1.214
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/29 23:30:00	1.215
@@ -1147,7 +1147,10 @@
 	gulm_ptr = model.getGULMPtr()
 	if not gulm_ptr:
 		return (False, {'errors': [ 'This cluster appears not to be using GULM locking.' ]})
-	node_list = map(lambda x: x.getName(), model.getNodes())
+	node_list = map(lambda x: x.getName(), gulm_ptr.getChildren())
+	for i in map(lambda x: x.getName(), model.getNodes()):
+		if not i in node_list:
+			node_list.append(i)
 
 	gulm_lockservers = list()
 	for node in node_list:
@@ -3365,9 +3368,11 @@
     lockserv_list = list()
     clunodes = model.getNodes()
     gulm_lockservs = map(lambda x: x.getName(), gulm_ptr.getChildren())
+    lockserv_list = map(lambda x: (x, True), gulm_lockservs)
     for node in clunodes:
       n = node.getName()
-      lockserv_list.append((n, n in gulm_lockservs))
+      if not n in gulm_lockservs:
+        lockserv_list.append((n, False))
     clumap['gulm'] = True
     clumap['gulm_url'] = prop_baseurl + PROPERTIES_TAB + '=' + PROP_GULM_TAB
     clumap['gulm_lockservers'] = lockserv_list
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2007/01/29 22:06:02	1.47
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2007/01/29 23:30:00	1.48
@@ -571,7 +571,7 @@
 				if not prev_auth:
 					try:
 						rc.unauth()
-					except:
+					except Exception, e:
 						luci_log.debug_verbose('VAC4: %s: %s' % (cur_host, str(e)))
 
 				errors.append(err_msg)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-26 19:35 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-26 19:35 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-26 19:35:00

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix a bug that caused nodes to be reported to be using fence devices they are not using.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.212&r2=1.213

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/26 17:56:14	1.212
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/26 19:35:00	1.213
@@ -4558,13 +4558,16 @@
 
   #Get list of fence devices
   fds = model.getFenceDevices()
-  nodes_used = list() #This section determines which nodes use the dev
   for fd in fds:
+    #This section determines which nodes use the dev
     #create fencedev hashmap
+    nodes_used = list()
+
     if fd.isShared() == True:
       fencedev = {}
       attr_hash = fd.getAttributes()
       kees = attr_hash.keys()
+
       for kee in kees:
         fencedev[kee] = attr_hash[kee] #copy attrs over
       try:
@@ -4572,6 +4575,7 @@
       except:
         fencedev['unknown'] = True
         fencedev['pretty_name'] = fd.getAgentType()
+
       fencedev['agent'] = fd.getAgentType()
       #Add config url for this fencedev
       fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-18  2:48 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-18  2:48 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-18 02:48:37

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	add support for the new resource agents

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.204&r2=1.205

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/17 22:14:02	1.204
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/18 02:48:37	1.205
@@ -17,6 +17,11 @@
 from NFSExport import NFSExport
 from Service import Service
 from Netfs import Netfs
+from Apache import Apache
+from MySQL import MySQL
+from Postgres8 import Postgres8
+from Tomcat5 import Tomcat5
+from OpenLDAP import OpenLDAP
 from Vm import Vm
 from Script import Script
 from Samba import Samba
@@ -119,7 +124,7 @@
 
 	system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
 	add_cluster['nodes'] = system_list
-	
+
 	for i in system_list:
 		cur_system = system_list[i]
 
@@ -150,7 +155,7 @@
 			try:
 				if prev_auth:
 					messages.append('Host %s is already authenticated.' \
-						% cur_host) 
+						% cur_host)
 				else:
 					rc.auth(cur_passwd)
 
@@ -258,7 +263,7 @@
 		try:
 			rc = RicciCommunicator(i)
 			if not rc:
-				raise 'rc is None'
+				raise Exception, 'rc is None'
 		except Exception, e:
 			msg = 'Unable to connect to the ricci agent on %s: %s' % (i, str(e))
 			errors.append(msg)
@@ -327,7 +332,7 @@
 		except Exception, e:
 			luci_log.debug_verbose('VACN1: %s: %s' % (clusterName, str(e)))
 			return (False, { 'errors': [ 'The database object for %s is missing.' % clusterName ] })
-		
+
 		try:
 			cluster_os = cluster_folder.manage_getProperty('cluster_os')
 			if not cluster_os:
@@ -369,7 +374,7 @@
 
 	system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
 	add_cluster['nodes'] = system_list
-	
+
 	for i in system_list:
 		cur_system = system_list[i]
 
@@ -399,7 +404,7 @@
 			try:
 				if prev_auth:
 					messages.append('Host %s is already authenticated.' \
-						% cur_host) 
+						% cur_host)
 				else:
 					rc.auth(cur_passwd)
 
@@ -855,7 +860,7 @@
 		return (False, {'errors': errors})
 
 	return (True, {'messages': ['Resource added successfully']})
-	
+
 ## Cluster properties form validation routines
 
 # rhel5 cluster version
@@ -1215,7 +1220,7 @@
       raise Exception, 'cluster name from model.getClusterName() is blank'
   except Exception, e:
     luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
-    errors.append('Unable to determine cluster name from model') 
+    errors.append('Unable to determine cluster name from model')
 
   if len(errors) > 0:
     return (retcode, {'errors': errors, 'messages': messages})
@@ -1251,7 +1256,7 @@
   errors = list()
   messages = list()
   rc = None
-                                                                                
+
   try:
     model = request.SESSION.get('model')
     if not model:
@@ -1266,12 +1271,12 @@
       except:
         luci_log.debug_verbose('VFE: no model, no cluster name')
         return (False, {'errors': ['No cluster model was found.']})
-                                                                                
+
     try:
       model = getModelForCluster(self, cluname)
     except:
       model = None
-                                                                                
+
     if model is None:
       luci_log.debug_verbose('VFE: unable to get model from session')
       return (False, {'errors': ['No cluster model was found.']})
@@ -1312,7 +1317,7 @@
         raise Exception, 'cluster name from model.getClusterName() is blank'
     except Exception, e:
       luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
-      errors.append('Unable to determine cluster name from model') 
+      errors.append('Unable to determine cluster name from model')
 
     if not rc:
       rc = getRicciAgent(self, clustername)
@@ -1345,7 +1350,7 @@
   errors = list()
   messages = list()
   rc = None
-                                                                                
+
   try:
     model = request.SESSION.get('model')
     if not model:
@@ -1360,12 +1365,12 @@
       except:
         luci_log.debug_verbose('VFE: no model, no cluster name')
         return (False, {'errors': ['No cluster model was found.']})
-                                                                                
+
     try:
       model = getModelForCluster(self, cluname)
     except:
       model = None
-                                                                                
+
     if model is None:
       luci_log.debug_verbose('VFE: unable to get model from session')
       return (False, {'errors': ['No cluster model was found.']})
@@ -1409,7 +1414,7 @@
         raise Exception, 'cluster name from model.getClusterName() is blank'
     except Exception, e:
       luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
-      errors.append('Unable to determine cluster name from model') 
+      errors.append('Unable to determine cluster name from model')
 
     if not rc:
       rc = getRicciAgent(self, clustername)
@@ -1519,10 +1524,10 @@
 				break
 		if delete_target is not None:
 			try:
-				node.getChildren()[0].removeChild(l)
+				node.getChildren()[0].removeChild(delete_target)
 			except Exception, e:
 				luci_log.debug_verbose('vNFC6a: %s: %s' % (method_id, str(e)))
-				return (False, {'errors': ['An error occurred while deleting fence method %s' % method_id ]}) 
+				return (False, {'errors': ['An error occurred while deleting fence method %s' % method_id ]})
 		else:
 			return (True, {'messages': ['No changes were made.'] })
 
@@ -1563,7 +1568,7 @@
 				return (False, {'errors': [ 'Unable to determine what device the current instance uses.' ]})
 
 			try:
-				parent_form = form_hash[parent][1].append(dummy_form)
+				form_hash[parent][1].append(dummy_form)
 				del dummy_form['fence_instance']
 			except Exception, e:
 				luci_log.debug_verbose('vNFC10: no parent for instance')
@@ -1777,7 +1782,7 @@
   errors = list()
   messages = list()
   rc = None
-                                                                                
+
   try:
     model = request.SESSION.get('model')
     if not model:
@@ -1792,12 +1797,12 @@
       except:
         luci_log.debug_verbose('VFE: no model, no cluster name')
         return (False, {'errors': ['No cluster model was found.']})
-                                                                                
+
     try:
       model = getModelForCluster(self, cluname)
     except:
       model = None
-                                                                                
+
     if model is None:
       luci_log.debug_verbose('VFE: unable to get model from session')
       return (False, {'errors': ['No cluster model was found.']})
@@ -1844,12 +1849,12 @@
   except:
     error_code = FD_VAL_FAIL
     error_string = "Fence device %s could not be removed from configuration" % fencedev_name
- 
+
   try:
     model.removeFenceInstancesForFenceDevice(fencedev_name)
   except:
     luci_log.debug_verbose('VFD: Could not remove fence instances for')
-     
+
 
   if error_code == FD_VAL_SUCCESS:
     messages.append(error_string)
@@ -1871,7 +1876,7 @@
         raise Exception, 'cluster name from model.getClusterName() is blank'
     except Exception, e:
       luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
-      errors.append('Unable to determine cluster name from model') 
+      errors.append('Unable to determine cluster name from model')
 
     if not rc:
       rc = getRicciAgent(self, clustername)
@@ -1960,7 +1965,7 @@
 		luci_log.debug_verbose('VDP5: RC %s: %s' % (nodename_resolved, str(e)))
 		errors.append('Unable to connect to the ricci agent on %s to update cluster daemon properties' % nodename_resolved)
 		return (False, {'errors': errors})
-		
+
 	batch_id, result = updateServices(rc, enable_list, disable_list)
 	if batch_id is None or result is None:
 		luci_log.debug_verbose('VDP6: setCluserConf: batchid or result is None')
@@ -2716,7 +2721,7 @@
 		except Exception, e:
 			luci_log.debug_verbose('GRA4b: %s' % str(e))
 			cur_alias = None
-			
+
 		if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
 			try:
 				luci_log.debug('GRA5: %s reports it\'s in cluster %s:%s; we expect %s' \
@@ -3176,7 +3181,7 @@
 	if batch_number is None or result is None:
 		luci_log.debug_verbose('serviceRestart2: %s failed' % svcname)
 		return None
-				
+
 	try:
 		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_RESTART, "Restarting service \'%s\'" % svcname)
 	except Exception, e:
@@ -4174,8 +4179,7 @@
         except:
           luci_log.debug_verbose('GNI0: unable to determine cluster name')
           return {}
-      
- 
+
   for item in nodelist:
     map = {}
     name = item['name']
@@ -4285,7 +4289,7 @@
               clustername = model.getClusterName()
               node_hash = {}
               node_hash['nodename'] = node.getName().strip()
-              node_hash['nodeurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
+              node_hash['nodeurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE
               nodes_used.append(node_hash)
 
       map['nodesused'] = nodes_used
@@ -4299,7 +4303,7 @@
       return fd
 
   raise
-  
+
 def getFenceInfo(self, model, request):
   if not model:
     luci_log.debug_verbose('getFenceInfo00: model is None')
@@ -4342,12 +4346,12 @@
       luci_log.debug_verbose('getFenceInfo2: unable to extract nodename: %s' \
           % str(e))
       return {}
-    
+
   #Here we need to get fences for a node - just the first two levels
   #Each level has its own list of fence devs used in that level
   #For each fence dev, a list of instance structs is appended
   #In addition, for each level, a list of available but unused fence devs
-  #is returned. 
+  #is returned.
   try:
     node = model.retrieveNodeByName(nodename)
   except GeneralError, e:
@@ -4396,7 +4400,7 @@
             if kee == "name":
               continue #Don't duplicate name attr
             fencedev[kee] = kidattrs[kee]
-          #This fencedev struct is complete, and needs to be placed on the 
+          #This fencedev struct is complete, and needs to be placed on the
           #level1 Q. Because it is non-shared, we should set last_kid_fd
           #to none.
           last_kid_fd = None
@@ -4425,7 +4429,7 @@
               fencedev['unknown'] = True
               fencedev['prettyname'] = fd.getAgentType()
             fencedev['isShared'] = True
-            fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV 
+            fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV
             fencedev['id'] = str(major_num)
             major_num = major_num + 1
             inlist = list()
@@ -4441,7 +4445,7 @@
               if kee == "name":
                 continue
               instance_struct[kee] = kidattrs[kee]
-            inlist.append(instance_struct) 
+            inlist.append(instance_struct)
             level1.append(fencedev)
             last_kid_fd = fencedev
             continue
@@ -4503,7 +4507,7 @@
             if kee == "name":
               continue #Don't duplicate name attr
             fencedev[kee] = kidattrs[kee]
-          #This fencedev struct is complete, and needs to be placed on the 
+          #This fencedev struct is complete, and needs to be placed on the
           #level2 Q. Because it is non-shared, we should set last_kid_fd
           #to none.
           last_kid_fd = None
@@ -4532,7 +4536,7 @@
               fencedev['unknown'] = True
               fencedev['prettyname'] = fd.getAgentType()
             fencedev['isShared'] = True
-            fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV 
+            fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV
             fencedev['id'] = str(major_num)
             major_num = major_num + 1
             inlist = list()
@@ -4548,7 +4552,7 @@
               if kee == "name":
                 continue
               instance_struct[kee] = kidattrs[kee]
-            inlist.append(instance_struct) 
+            inlist.append(instance_struct)
             level2.append(fencedev)
             last_kid_fd = fencedev
             continue
@@ -4576,8 +4580,8 @@
         shared2.append(shared_struct)
     map['shared2'] = shared2
 
-  return map    
-      
+  return map
+
 def getFencesInfo(self, model, request):
   map = {}
   if not model:
@@ -4625,7 +4629,7 @@
                 continue
               node_hash = {}
               node_hash['nodename'] = node.getName().strip()
-              node_hash['nodeurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
+              node_hash['nodeurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE
               nodes_used.append(node_hash)
 
       fencedev['nodesused'] = nodes_used
@@ -4634,7 +4638,6 @@
   map['fencedevs'] = fencedevs
   return map
 
-    
 def getLogsForNode(self, request):
 	try:
 		nodename = request['nodename']
@@ -4700,7 +4703,7 @@
     xenvmname = req['servicename']
   except KeyError, e:
     isNew = True
-  
+
   if isNew == True:
     xvm = Vm()
     xvm.addAttribute("name", req.form['xenvmname'])
@@ -4829,7 +4832,7 @@
       node_report = {}
       node_report['isnodecreation'] = True
       node_report['iserror'] = False  #Default value
-      node_report['desc'] = item[1].getProperty(FLAG_DESC) 
+      node_report['desc'] = item[1].getProperty(FLAG_DESC)
       batch_xml = None
       ricci = item[0].split("____") #This removes the 'flag' suffix
 
@@ -4885,7 +4888,7 @@
       if batch_xml is None:  #The job is done and gone from queue
         if redirect_message == False: #We have not displayed this message yet
           node_report['desc'] = REDIRECT_MSG
-          node_report['iserror'] = True 
+          node_report['iserror'] = True
           node_report['errormessage'] = ""
           nodereports.append(node_report)
           redirect_message = True
@@ -4984,7 +4987,7 @@
             luci_log.debug_verbose('ICB16: last_status err: %s %d: %s' \
               % (item[0], creation_status, str(e)))
           continue
-          
+
     else:
       node_report = {}
       node_report['isnodecreation'] = False
@@ -5306,696 +5309,1464 @@
 	response.redirect(request['URL'] + "?pagetype=" + RESOURCES + "&clustername=" + clustername + '&busyfirst=true')
 
 def addIp(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addIp error: form is missing')
+		luci_log.debug_verbose('addIp0: form is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addIp error: model is missing')
+		luci_log.debug_verbose('addIp1: model is missing')
 		return None
 
+	res = None
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
+				raise Exception, 'oldname is blank.'
+
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No IP resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('addIp error: %s' % str(e))
-			return None
+			errors.append('No original name was found for this IP resource.')
 	else:
 		try:
 			res = Ip()
 			if not res:
-				raise Exception, 'apply(Ip) is None'
+				raise Exception, 'res is None'
 		except Exception, e:
-			luci_log.debug_verbose('addIp error: %s' % str(e))
-			return None
+			errors.append('An error occurred while creating an IP resource.')
+			luci_log.debug_verbose('addIp3: %s' % str(e))
 
 	if not res:
-		luci_log.debug_verbose('addIp error: res is none')
-		return None
+		return [None, None, errors]
 
-	errors = list()
 	try:
 		addr = form['ip_address'].strip()
 		if not addr:
 			raise KeyError, 'ip_address is blank'
 		# XXX: validate IP addr
-		res.attr_hash['address'] = addr
+		res.addAttribute('address', addr)
 	except KeyError, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addIp error: %s' % err)
+		luci_log.debug_verbose('addIp4: %s' % err)
 
 	if 'monitorLink' in form:
-		res.attr_hash['monitor_link'] = '1'
+		res.addAttribute('monitor_link', '1')
 	else:
-		res.attr_hash['monitor_link'] = '0'
+		res.addAttribute('monitor_link', '0')
 
 	if len(errors) > 1:
 		return [None, None, errors]
 	return [res, model, None]
 
 def addFs(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addFs error: form is missing')
+		luci_log.debug_verbose('addFs0: form is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addFs error: model is missing')
+		luci_log.debug_verbose('addFs1: model is missing')
 		return None
 
+	res = None
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
+				raise Exception, 'oldname is blank.'
+
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No filesystem resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('addFs error: %s' % str(e))
-			return None
+			errors.append('No original name was found for this filesystem resource.')
+			luci_log.debug_verbose('addFs3: %s' % str(e))
 	else:
 		try:
 			res = Fs()
 			if not res:
-				raise Exception, 'apply(Fs) is None'
+				raise Exception, 'res is None'
 		except Exception, e:
-			luci_log.debug_verbose('addFs error: %s' % str(e))
-			return None
+			errors.append('An error occurred while creating a filesystem resource.')
+			luci_log.debug_verbose('addFs4: %s' % str(e))
 
 	if not res:
-		luci_log.debug_verbose('addFs error: fs obj was not created')
-		return None
+		return [None, None, errors]
 
 	# XXX: sanity check these fields
-	errors = list()
 	try:
 		name = form['resourceName'].strip()
-		res.attr_hash['name'] = name
+		if not name:
+			raise Exception, 'No name was given for this filesystem resource.'
+		res.addAttribute('name', name)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs error: %s' % err)
+		luci_log.debug_verbose('addFs5: %s' % err)
 
 	try:
 		mountpoint = form['mountpoint'].strip()
-		res.attr_hash['mountpoint'] = mountpoint
+		if not mountpoint:
+			raise Exception, 'No mount point was given for this filesystem resource.'
+		res.addAttribute('mountpoint', mountpoint)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs error: %s' % err)
+		luci_log.debug_verbose('addFs6: %s' % err)
 
 	try:
 		device = form['device'].strip()
-		res.attr_hash['device'] = device
+		if not device:
+			raise Exception, 'No device was given for this filesystem resource.'
+		res.addAttribute('device', device)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs error: %s' % err)
+		luci_log.debug_verbose('addFs7: %s' % err)
 
 	try:
 		options = form['options'].strip()
-		res.attr_hash['options'] = options
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs error: %s' % err)
+		luci_log.debug_verbose('addFs8: %s' % err)
 
 	try:
 		fstype = form['fstype'].strip()
-		res.attr_hash['fstype'] = fstype
+		if not fstype:
+			raise Exception, 'No filesystem type was given for this filesystem resource.'
+		res.addAttribute('fstype', fstype)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs error: %s' % err)
+		luci_log.debug_verbose('addFs9: %s' % err)
 
 	try:
 		fsid = form['fsid'].strip()
-		res.attr_hash['fsid'] = fsid
+		if not fsid:
+			raise Exception, 'No filesystem ID was given for this filesystem resource.'
+		res.addAttribute('fsid', fsid)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs error: %s' % err)
+		luci_log.debug_verbose('addFs10: %s' % err)
 
 	if form.has_key('forceunmount'):
-		res.attr_hash['force_unmount'] = '1'
+		res.addAttribute('force_unmount', '1')
 	else:
-		res.attr_hash['force_unmount'] = '0'
+		res.addAttribute('force_unmount', '0')
 
 	if form.has_key('selffence'):
-		res.attr_hash['self_fence'] = '1'
+		res.addAttribute('self_fence', '1')
 	else:
-		res.attr_hash['self_fence'] = '0'
+		res.addAttribute('self_fence', '0')
 
 	if form.has_key('checkfs'):
-		res.attr_hash['force_fsck'] = '1'
+		res.addAttribute('force_fsck', '1')
 	else:
-		res.attr_hash['force_fsck'] = '0'
+		res.addAttribute('force_fsck', '0')
 
 	if len(errors) > 1:
 		return [None, None, errors]
 	return [res, model, None]
 
 def addGfs(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addGfs error: form is missing')
+		luci_log.debug_verbose('addGfs0: form is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addGfs error: model is missing')
+		luci_log.debug_verbose('addGfs1: model is missing')
 		return None
 
+	res = None
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
-			if not res:
-				luci_log.debug('resource %s was not found for editing' % oldname)
-				return None
+				raise Exception, 'oldname is blank'
+
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No filesystem resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug('resource %s was not found for editing: %s' \
-				% (oldname, str(e)))
-			return None
+			errors.append('No original name was found for this cluster filesystem resource.')
+			luci_log.debug_verbose('addGfs2: %s' % str(e))
 	else:
 		try:
 			res = Clusterfs()
 			if not res:
-				raise Exception, 'apply(Clusterfs) is None'
+				raise Exception, 'res is None'
 		except Exception, e:
-			luci_log.debug('addGfs error: %s' % str(e))
-			return None
-		except:
-			luci_log.debug('addGfs error')
-			return None
+			errors.append('An error occurred while creating a cluster filesystem resource.')
+			luci_log.debug('addGfs3: %s' % str(e))
+
+	if not res:
+		return [None, None, errors]
 
 	# XXX: sanity check these fields
-	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise KeyError, 'resourceName is blank'
-		res.attr_hash['name'] = name
+			raise Exception, 'No name was given for this cluster filesystem resource.'
+		res.addAttribute('name', name)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs error: %s' % err)
+		luci_log.debug_verbose('addGfs4: %s' % err)
 
 	try:
 		mountpoint = form['mountpoint'].strip()
-		res.attr_hash['mountpoint'] = mountpoint
+		if not mountpoint:
+			raise Exception, 'No mount point was given for this cluster filesystem resource.'
+		res.addAttribute('mountpoint', mountpoint)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs error: %s' % err)
+		luci_log.debug_verbose('addGfs5: %s' % err)
 
 	try:
 		device = form['device'].strip()
-		res.attr_hash['device'] = device
+		if not device:
+			raise Exception, 'No device was given for this cluster filesystem resource.'
+		res.addAttribute('device', device)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs error: %s' % err)
+		luci_log.debug_verbose('addGfs6: %s' % err)
 
 	try:
 		options = form['options'].strip()
-		res.attr_hash['options'] = options
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs error: %s' % err)
+		luci_log.debug_verbose('addGfs7: %s' % err)
 
 	try:
 		fsid = form['fsid'].strip()
-		res.attr_hash['fsid'] = fsid
+		if not fsid:
+			raise Exception, 'No filesystem ID was given for this cluster filesystem resource.'
+		res.addAttribute('fsid', fsid)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs error: %s' % err)
+		luci_log.debug_verbose('addGfs8: %s' % err)
 
 	if form.has_key('forceunmount'):
-		res.attr_hash['force_unmount'] = '1'
+		res.addAttribute('force_unmount', '1')
 	else:
-		res.attr_hash['force_unmount'] = '0'
+		res.addAttribute('force_unmount', '0')
 
 	if len(errors) > 1:
 		return [None, None, errors]
+
 	return [res, model, None]
 
 def addNfsm(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addNfsm error: form is missing')
+		luci_log.debug_verbose('addNfsm0: form is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addNfsm error: model is missing')
+		luci_log.debug_verbose('addNfsm1: model is missing')
 		return None
 
+	res = None
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
+				raise Exception, 'oldname is blank'
+
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No NFS mount resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('addNfsm error: %s' % str(e))
-			return None
+			errors.append('No original name was found for this NFS mount resource.')
+			luci_log.debug_verbose('addNfsm2: %s' % str(e))
 	else:
 		try:
 			res = Netfs()
+			if not res:
+				raise Exception, 'res is None'
 		except Exception, e:
-			luci_log.debug_verbose('addNfsm error: %s' % str(e))
-			return None
+			errors.append('An error occurred while creating a NFS mount resource.')
+			luci_log.debug_verbose('addNfsm3: %s' % str(e))
 
 	if not res:
-		return None
+		return [None, None, errors]
 
 	# XXX: sanity check these fields
-	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise KeyError, 'resourceName is blank'
-		res.attr_hash['name'] = name
+			raise Exception, 'No name was given for this NFS mount resource.'
+		res.addAttribute('name', name)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm error: %s' % err)
+		luci_log.debug_verbose('addNfsm4: %s' % err)
 
 	try:
 		mountpoint = form['mountpoint'].strip()
-		res.attr_hash['mountpoint'] = mountpoint
+		if not mountpoint:
+			raise Exception, 'No mount point was given for NFS mount resource.'
+		res.addAttribute('mountpoint', mountpoint)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm error: %s' % err)
-		
+		luci_log.debug_verbose('addNfsm5: %s' % err)
+
 	try:
 		host = form['host'].strip()
-		res.attr_hash['host'] = host
+		if not host:
+			raise Exception, 'No host server was given for this NFS mount resource.'
+		res.addAttribute('host', host)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm error: %s' % err)
+		luci_log.debug_verbose('addNfsm6 error: %s' % err)
 
 	try:
 		options = form['options'].strip()
-		res.attr_hash['options'] = options
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm error: %s' % err)
+		luci_log.debug_verbose('addNfsm7: %s' % err)
 
 	try:
 		exportpath = form['exportpath'].strip()
-		res.attr_hash['exportpath'] = exportpath 
+		if not exportpath:
+			raise Exception, 'No export path was given for this NFS mount resource.'
+		res.addAttribute('exportpath', exportpath)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm error: %s' % err)
+		luci_log.debug_verbose('addNfsm8: %s' % err)
 
 	try:
 		nfstype = form['nfstype'].strip().lower()
 		if nfstype != 'nfs' and nfstype != 'nfs4':
-			raise KeyError, 'invalid nfs type: %s' % nfstype
-		res.attr_hash['nfstype'] = nfstype
+			raise Exception, 'An invalid NFS version \"%s\" was given.' % nfstype
+		res.addAttribute('nfstype', nfstype)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm error: %s' % err)
+		luci_log.debug_verbose('addNfsm9: %s' % err)
 
 	if form.has_key('forceunmount'):
-		res.attr_hash['force_unmount'] = '1'
+		res.addAttribute('force_unmount', '1')
 	else:
-		res.attr_hash['force_unmount'] = '0'
+		res.addAttribute('force_unmount', '0')
 
 	if len(errors) > 1:
 		return [None, None, errors]
 	return [res, model, None]
 
 def addNfsc(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addNfsc error: form is missing')
+		luci_log.debug_verbose('addNfsc0: form is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addNfsc error: model is missing')
+		luci_log.debug_verbose('addNfsc1: model is missing')
 		return None
 
+	res = None
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
+				raise Exception, 'oldname is blank'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No NFS client resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('addNfsc error: %s' % str(e))
-			return None
+			errors.append('No original name was found for this NFS client resource.')
+			luci_log.debug_verbose('addNfsc2: %s' % str(e))
 	else:
 		try:
 			res = NFSClient()
-		except:
-			luci_log.debug_verbose('addNfsc error: %s' % str(e))
-			return None
+			if not res:
+				raise Exception, 'res is None'
+		except Exception, e:
+			errors.append('An error occurred while creating a NFS client resource.')
+			luci_log.debug_verbose('addNfsc3: %s' % str(e))
 
 	if not res:
-		luci_log.debug_verbose('addNfsc error: res is none')
-		return None
+		return [None, None, errors]
 
-	errors = list()
+	# XXX: sanity check these fields
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise KeyError, 'resourceName is blank'
-		res.attr_hash['name'] = name
+			raise Exception, 'No name was given for this NFS client resource.'
+		res.addAttribute('name', name)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsc error: %s' % err)
+		luci_log.debug_verbose('addNfsc4: %s' % err)
 
 	try:
 		target = form['target'].strip()
-		res.attr_hash['target'] = target 
+		if not target:
+			raise Exception, 'No target was given for NFS client resource.'
+		res.addAttribute('target', target)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsc error: %s' % err)
+		luci_log.debug_verbose('addNfsc5: %s' % err)
 
 	try:
 		options = form['options'].strip()
-		res.attr_hash['options'] = options
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsc error: %s' % err)
+		luci_log.debug_verbose('addNfsc6: %s' % err)
+
+	if form.has_key('allow_recover'):
+		res.addAttribute('allow_recover', '1')
+	else:
+		res.addAttribute('allow_recover', '0')
 
 	if len(errors) > 1:
 		return [None, None, errors]
 	return [res, model, None]
 
 def addNfsx(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addNfsx error: model is missing')
+		luci_log.debug_verbose('addNfsx0: model is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addNfsx error: model is missing')
+		luci_log.debug_verbose('addNfsx0: model is missing')
 		return None
 
+	res = None
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
+				raise Exception, 'oldname is blank'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No NFS export resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('addNfsx error: %s', str(e))
-			return None
+			errors.append('No original name was found for this NFS export resource.')
+			luci_log.debug_verbose('addNfsx2: %s', str(e))
 	else:
 		try:
 			res = NFSExport()
-		except:
-			luci_log.debug_verbose('addNfsx error: %s', str(e))
-			return None
+			if not res:
+				raise Exception, 'res is None'
+		except Exception, e:
+			errors.append('An error occurred while creating a NFS clientresource.')
+			luci_log.debug_verbose('addNfsx3: %s', str(e))
 
 	if not res:
-		luci_log.debug_verbose('addNfsx error: res is None')
-		return None
+		return [None, None, errors]
 
-	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise KeyError, 'resourceName is blank'
-		res.attr_hash['name'] = name
+			raise Exception, 'No name was given for this NFS export resource.'
+		res.addAttribute('name', name)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsx error: %s', err)
+		luci_log.debug_verbose('addNfsx4: %s' % err)
 
 	if len(errors) > 1:
 		return [None, None, errors]
 	return [res, model, None]
 
 def addScr(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addScr error: form is missing')
+		luci_log.debug_verbose('addScr0: form is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addScr error: model is missing')
+		luci_log.debug_verbose('addScr1: model is missing')
 		return None
 
+	res = None
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
+				raise Exception, 'oldname is blank'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No script resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('addScr error: %s' % str(e))
-			return None
+			errors.append('No original name was found for this script resource.')
+			luci_log.debug_verbose('addScr2: %s' % str(e))
 	else:
 		try:
 			res = Script()
+			if not res:
+				raise Exception, 'res is None'
 		except Exception, e:
-			luci_log.debug_verbose('addScr error: %s' % str(e))
-			return None
+			errors.append('An error occurred while creating a script resource.')
+			luci_log.debug_verbose('addScr3: %s' % str(e))
 
 	if not res:
-		luci_log.debug_verbose('addScr error: res is None')
-		return None
+		return [None, None, errors]
 
-	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise KeyError, 'resourceName is blank'
-		res.attr_hash['name'] = name
+			raise Exception, 'No name was given for this script resource.'
+		res.addAttribute('name', name)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addScr error: %s' % err)
+		luci_log.debug_verbose('addScr4: %s' % err)
 
 	try:
 		path = form['file'].strip()
 		if not path:
-			raise KeyError, 'file path is blank'
-		res.attr_hash['file'] = path
+			raise Exception, 'No path to a script file was given for this script resource.'
+		res.addAttribute('file', path)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addScr error: %s' % err)
+		luci_log.debug_verbose('addScr5: %s' % err)
 
 	if len(errors) > 1:
 		return [None, None, errors]
 	return [res, model, None]
 
 def addSmb(request, form=None):
+	errors = list()
+
 	if form is None:
 		form = request.form
 
 	if not form:
-		luci_log.debug_verbose('addSmb error: form is missing')
+		luci_log.debug_verbose('addSmb0: form is missing')
 		return None
 
 	model = request.SESSION.get('model')
 	if not model:
-		luci_log.debug_verbose('addSmb error: model is missing')
+		luci_log.debug_verbose('addSmb1: model is missing')
 		return None
 
 	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
-				raise KeyError, 'oldname is blank.'
-			res = getResourceForEdit(model, oldname)
+				raise Exception, 'oldname is blank'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e1:
+				errors.append('No Samba resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('addSmb error: %s' % str(e))
-			return None
+			errors.append('No original name was found for this Samba resource.')
+			luci_log.debug_verbose('addSmb2: %s' % str(e))
 	else:
 		try:
 			res = Samba()
+			if not res:
+				raise Exception, 'res is None'
 		except Exception, e:
-			luci_log.debug_verbose('addSmb error: %s' % str(e))
-			return None
+			errors.append('An error occurred while creating a Samba resource.')
+			luci_log.debug_verbose('addSmb3: %s' % str(e))
 
 	if not res:
-		luci_log.debug_verbose('addSmb error: res is None')
-		return None
+		return [None, None, errors]
 
-	errors = list()
 	try:
 		name = form['resourceName'].strip()
 		if not name:
-			raise KeyError, 'resourceName is blank'
-		res.attr_hash['name'] = name
+			raise Exception, 'No name was given for this Samba resource.'
+		res.addAttribute('name', name)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addSmb error: %s' % err)
+		luci_log.debug_verbose('addSmb4: %s' % err)
 
 	try:
 		workgroup = form['workgroup'].strip()
-		res.attr_hash['workgroup'] = workgroup
+		if not workgroup:
+			raise Exception, 'No workgroup was given for this Samba resource.'
+		res.addAttribute('workgroup', workgroup)
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
-		luci_log.debug_verbose('addSmb error: %s' % err)
+		luci_log.debug_verbose('addSmb5: %s' % err)
 
 	if len(errors) > 1:
 		return [None, None, errors]
 	return [res, model, None]
 
-resourceAddHandler = {
-	'ip': addIp,
-	'fs': addFs,
-	'gfs': addGfs,
-	'nfsm': addNfsm,
-	'nfsx': addNfsx,
-	'nfsc': addNfsc,
-	'scr': addScr,
-	'smb': addSmb
-}
-
-def resolveClusterChanges(self, clusterName, model):
-	try:
-		mb_nodes = model.getNodes()
-		if not mb_nodes or not len(mb_nodes):
-			raise Exception, 'node list is empty'
-	except Exception, e:
-		luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %s' \
-				% (str(e), clusterName))
-		return 'Unable to find cluster nodes for %s' % clusterName
-
-	try:
-		cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
-		if not cluster_node:
-			raise Exception, 'cluster node is none'
-	except Exception, e:
-		luci_log.debug('RCC1: cant find cluster node for %s: %s'
-			% (clusterName, str(e)))
-		return 'Unable to find an entry for %s in the Luci database.' % clusterName
-
-	try:
-		db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
-		if not db_nodes or not len(db_nodes):
-			raise Exception, 'no database nodes'
-	except Exception, e:
-		# Should we just create them all? Can this even happen?
-		luci_log.debug('RCC2: error: %s' % str(e))
-		return 'Unable to find database entries for any nodes in %s' % clusterName
+def addApache(request, form=None):
+	errors = list()
 
-	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
+	if form is None:
+		form = request.form
 
-	# this is a really great algorithm.
-	missing_list = list()
-	new_list = list()
-	for i in mb_nodes:
-		for j in db_nodes:
-			f = 0
-			if same_host(i, j):
-				f = 1
-				break
-		if not f:
-			new_list.append(i)
+	if not form:
+		luci_log.debug_verbose('addApache0: form is missing')
+		return None
 
-	for i in db_nodes:
-		for j in mb_nodes:
-			f = 0
-			if same_host(i, j):
-				f = 1
-				break
-		if not f:
-			missing_list.append(i)
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addApache1: model is missing')
+		return None
 
-	messages = list()
-	for i in missing_list:
+	res = None
+	if form.has_key('edit'):
 		try:
-			## or alternately
-			##new_node = cluster_node.restrictedTraverse(i)
-			##setNodeFlag(self, new_node, CLUSTER_NODE_NOT_MEMBER)
-			cluster_node.delObjects([i])
-			messages.append('Node \"%s\" is no longer in a member of cluster \"%s\." It has been deleted from the management interface for this cluster.' % (i, clusterName))
-			luci_log.debug_verbose('VCC3: deleted node %s' % i)
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise Exception, 'oldname is blank.'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e:
+				errors.append('No Apache resource named \"%s\" exists.' % oldname)
 		except Exception, e:
-			luci_log.debug_verbose('VCC4: delObjects: %s: %s' % (i, str(e)))
-
-	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
-	for i in new_list:
+			errors.append('No original name was found for this Apache resource.')
+			luci_log.debug_verbose('addApache2: %s' % str(e))
+	else:
 		try:
-			cluster_node.manage_addFolder(i, '__luci__:csystem:' + clusterName)
-			new_node = cluster_node.restrictedTraverse(i)
-			setNodeFlag(self, new_node, new_flags)
-			messages.append('A new cluster node, \"%s,\" is now a member of cluster \"%s.\" It has been added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.' % (i, clusterName))
+			res = Apache()
+			if not res:
+				raise Exception, 'could not create Apache object'
 		except Exception, e:
-			messages.append('A new cluster node, \"%s,\" is now a member of cluster \"%s,\". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clusterName))
-			luci_log.debug_verbose('VCC5: addFolder: %s/%s: %s' \
-				% (clusterName, i, str(e)))
-	
-	return messages
+			errors.append('An error occurred while creating an Apache resource.')
+			luci_log.debug_verbose('addApache3: %s' % str(e))
 
-def addResource(self, request, model, res, res_type):
-	clustername = model.getClusterName()
-	if not clustername:
-		luci_log.debug_verbose('addResource0: no cluname from mb')
-		return 'Unable to determine cluster name'
+	if not res:
+		return [None, None, errors]
 
-	rc = getRicciAgent(self, clustername)
-	if not rc:
-		luci_log.debug_verbose('addResource1: unable to find a ricci agent for cluster %s' % clustername)
-		return 'Unable to find a ricci agent for the %s cluster' % clustername
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise Exception, 'No name was given for this Apache resource.'
+		res.addAttribute('name', name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addApache4: %s' % err)
 
 	try:
-		model.getResourcesPtr().addChild(res)
+		server_root = form['server_root'].strip()
+		if not server_root:
+			raise KeyError, 'No server root was given for this Apache resource.'
+		res.addAttribute('server_root', server_root)
 	except Exception, e:
-		luci_log.debug_verbose('addResource2: adding the new resource failed: %s' % str(e))
-		return 'Unable to add the new resource'
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addApache5: %s' % err)
 
 	try:
-		cp = model.getClusterPtr()
-		cp.incrementConfigVersion()
-		model.setModified(True)
-		conf = model.exportModelAsString()
-		if not conf:
-			raise Exception, 'model string for %s is blank' % clustername
+		config_file = form['config_file'].strip()
+		if not server_root:
+			raise KeyError, 'No path to the Apache configuration file was given.'
+		res.addAttribute('config_file', config_file)
 	except Exception, e:
-		luci_log.debug_verbose('addResource3: exportModelAsString : %s' \
-			% str(e))
-		return 'An error occurred while adding this resource'
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addApache6: %s' % err)
 
 	try:
-		ragent = rc.hostname()
+		options = form['httpd_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('httpd_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('httpd_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addApache7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', shutdown_wait)
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', 0)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addApache7: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, model, None]
+
+def addMySQL(request, form=None):
+	errors = list()
+
+	if form is None:
+		form = request.form
+
+	if not form:
+		luci_log.debug_verbose('addMySQL0: form is missing')
+		return None
+
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addMySQL1: model is missing')
+		return None
+
+	res = None
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise Exception, 'oldname is blank.'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e:
+				errors.append('No MySQL resource named \"%s\" exists.' % oldname)
+		except Exception, e:
+			errors.append('No original name was found for this MySQL resource.')
+			luci_log.debug_verbose('addMySQL2: %s' % str(e))
+	else:
+		try:
+			res = MySQL()
+			if not res:
+				raise Exception, 'could not create MySQL object'
+		except Exception, e:
+			errors.append('An error occurred while creating a MySQL resource.')
+			luci_log.debug_verbose('addMySQL3: %s' % str(e))
+
+	if not res:
+		return [None, None, errors]
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise Exception, 'No name was given for this MySQL resource.'
+		res.addAttribute('name', name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addMySQL4: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the MySQL configuration file was given.'
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addMySQL5: %s' % err)
+
+	try:
+		listen_addr = form['listen_address'].strip()
+		if not listen_addr:
+			raise KeyError, 'No address was given for MySQL server to listen on.'
+		res.addAttribute('listen_address', listen_addr)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addMySQL6: %s' % err)
+
+	try:
+		options = form['mysql_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('mysql_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('mysql_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addMySQL7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', shutdown_wait)
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', 0)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addMySQL7: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, model, None]
+
+def addOpenLDAP(request, form=None):
+	errors = list()
+
+	if form is None:
+		form = request.form
+
+	if not form:
+		luci_log.debug_verbose('addOpenLDAP0: form is missing')
+		return None
+
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addOpenLDAP1: model is missing')
+		return None
+
+	res = None
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise Exception, 'oldname is blank.'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e:
+				errors.append('No OpenLDAP resource named \"%s\" exists.' % oldname)
+		except Exception, e:
+			errors.append('No original name was found for this OpenLDAP resource.')
+			luci_log.debug_verbose('addOpenLDAP2: %s' % str(e))
+	else:
+		try:
+			res = OpenLDAP()
+			if not res:
+				raise Exception, 'could not create OpenLDAP object'
+		except Exception, e:
+			errors.append('An error occurred while creating an OpenLDAP resource.')
+			luci_log.debug_verbose('addOpenLDAP3: %s' % str(e))
+
+	if not res:
+		return [None, None, errors]
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise Exception, 'No name was given for this OpenLDAP resource.'
+		res.addAttribute('name', name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addOpenLDAP4: %s' % err)
+
+	try:
+		url_list = form['url_list'].strip()
+		if not url_list:
+			raise KeyError, 'No URL list was given for this OpenLDAP resource.'
+		res.addAttribute('url_list', url_list)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addOpenLDAP5: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the OpenLDAP configuration file was given.'
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addOpenLDAP6: %s' % err)
+
+	try:
+		options = form['slapd_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('slapd_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('slapd_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', shutdown_wait)
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', 0)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, model, None]
+
+def addPostgres8(request, form=None):
+	errors = list()
+
+	if form is None:
+		form = request.form
+
+	if not form:
+		luci_log.debug_verbose('addPostgreSQL80: form is missing')
+		return None
+
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addPostgreSQL81: model is missing')
+		return None
+
+	res = None
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise Exception, 'oldname is blank.'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e:
+				errors.append('No PostgreSQL 8 resource named \"%s\" exists.' % oldname)
+		except Exception, e:
+			errors.append('No original name was found for this PostgreSQL 8 resource.')
+			luci_log.debug_verbose('addPostgreSQL82: %s' % str(e))
+	else:
+		try:
+			res = Postgres8()
+			if not res:
+				raise Exception, 'could not create PostgreSQL 8 object'
+		except Exception, e:
+			errors.append('An error occurred while creating a PostgreSQL 8 resource.')
+			luci_log.debug_verbose('addPostgreSQL83: %s' % str(e))
+
+	if not res:
+		return [None, None, errors]
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise Exception, 'No name was given for this PostgreSQL 8 resource.'
+		res.addAttribute('name', name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addPostgreSQL84: %s' % err)
+
+	try:
+		user = form['postmaster_user'].strip()
+		if not user:
+			raise KeyError, 'No postmaster user was given for this PostgreSQL 8 resource.'
+		res.addAttribute('postmaster_user', user)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addPostgreSQL85: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the PostgreSQL 8 configuration file was given.'
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addPostgreSQL86: %s' % err)
+
+	try:
+		options = form['postmaster_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('postmaster_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('postmaster_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', shutdown_wait)
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', 0)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, model, None]
+
+def addTomcat5(request, form=None):
+	errors = list()
+
+	if form is None:
+		form = request.form
+
+	if not form:
+		luci_log.debug_verbose('addTomcat50: form is missing')
+		return None
+
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addTomcat51: model is missing')
+		return None
+
+	res = None
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise Exception, 'oldname is blank.'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e:
+				errors.append('No Tomcat 5 resource named \"%s\" exists.' % oldname)
+		except Exception, e:
+			errors.append('No original name was found for this Tomcat 5 resource.')
+			luci_log.debug_verbose('addTomcat52: %s' % str(e))
+	else:
+		try:
+			res = Tomcat5()
+			if not res:
+				raise Exception, 'could not create Tomcat5 object'
+		except Exception, e:
+			errors.append('An error occurred while creating a Tomcat 5 resource.')
+			luci_log.debug_verbose('addTomcat53: %s' % str(e))
+
+	if not res:
+		return [None, None, errors]
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise Exception, 'No name was given for this Tomcat 5 resource.'
+		res.addAttribute('name', name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addTomcat54: %s' % err)
+
+	try:
+		user = form['tomcat_user'].strip()
+		if not user:
+			raise KeyError, 'No user was given for this Tomcat 5 resource.'
+		res.addAttribute('tomcat_user', user)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addTomcat55: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the Tomcat 5 configuration file was given.'
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addTomcat56: %s' % err)
+
+	try:
+		options = form['catalina_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('catalina_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('catalina_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addTomcat57: %s' % err)
+
+	try:
+		catalina_base = form['catalina_base'].strip()
+		if not catalina_base:
+			raise KeyError, 'No cataliny base directory was given for this Tomcat 5 resource.'
+		res.addAttribute('catalina_base', catalina_base)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addTomcat58: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', shutdown_wait)
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', 0)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addTomcat59: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, model, None]
+
+def addVM(request, form=None):
+	errors = list()
+
+	if form is None:
+		form = request.form
+
+	if not form:
+		luci_log.debug_verbose('addVM0: form is missing')
+		return None
+
+	model = request.SESSION.get('model')
+	if not model:
+		luci_log.debug_verbose('addVM1: model is missing')
+		return None
+
+	res = None
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise Exception, 'oldname is blank.'
+			try:
+				res = getResourceForEdit(model, oldname)
+			except KeyError, e:
+				errors.append('No VM resource named \"%s\" exists.' % oldname)
+		except Exception, e:
+			errors.append('No original name was found for this VM resource.')
+			luci_log.debug_verbose('addVM2: %s' % str(e))
+	else:
+		try:
+			res = Vm()
+			if not res:
+				raise Exception, 'could not create VM object'
+		except Exception, e:
+			errors.append('An error occurred while creating an VM resource.')
+			luci_log.debug_verbose('addVM3: %s' % str(e))
+
+	if not res:
+		return [None, None, errors]
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise Exception, 'No name was given for this VM resource.'
+		res.addAttribute('name', name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM4: %s' % err)
+
+	try:
+		domain = form['domain'].strip()
+		if not domain:
+			raise KeyError, 'No domain was given for this VM resource.'
+		res.addAttribute('domain', domain)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM5: %s' % err)
+
+	try:
+		bootloader = form['bootloader'].strip()
+		if not bootloader:
+			raise KeyError, 'No bootloader was given for this VM resource.'
+		res.addAttribute('bootloader', bootloader)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM6: %s' % err)
+
+	try:
+		path = form['path'].strip()
+		if not path:
+			raise KeyError, 'No path specification was given for this VM resource.'
+		res.addAttribute('path', path)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM7: %s' % err)
+
+	try:
+		disk = form['rootdisk_physical'].strip()
+		if not disk:
+			raise KeyError, 'No physical root disk was given for this VM resource.'
+		res.addAttribute('rootdisk_physical', disk)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM8: %s' % err)
+
+	try:
+		disk = form['rootdisk_virtual'].strip()
+		if not disk:
+			raise KeyError, 'No virtual root disk was given for this VM resource.'
+		res.addAttribute('rootdisk_virtual', disk)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM9: %s' % err)
+
+	try:
+		disk = form['swapdisk_physical'].strip()
+		if not disk:
+			raise KeyError, 'No physical swap disk was given for this VM resource.'
+		res.addAttribute('swapdisk_physical', disk)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM10: %s' % err)
+
+	try:
+		disk = form['swapdisk_virtual'].strip()
+		if not disk:
+			raise KeyError, 'No virtual swap disk was given for this VM resource.'
+		res.addAttribute('swapdisk_virtual', disk)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM11: %s' % err)
+
+	try:
+		vif = form['vif'].strip()
+		if not vif:
+			raise KeyError, 'No virtual interface MAC address was given for this VM resource.'
+		res.addAttribute('vif', vif)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM12: %s' % err)
+
+	try:
+		memsize = int(form['memory'].strip())
+		res.addAttribute('memory', memsize)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM13: %s' % err)
+
+	try:
+		recovery = form['recovery'].lower().strip()
+		if not recovery:
+			raise Exception, 'No recovery method was given for this VM resource.'
+		if recovery != 'restart' and recovery != 'relocate' and recovery != 'disable':
+			raise Exception, 'An invalid recovery type, \"%s\", was given for this VM resource.' % recovery
+		res.addAttribute('recovery', recovery)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		luci_log.debug_verbose('addVM14: %s' % err)
+
+	if len(errors) > 1:
+		return [None, None, errors]
+	return [res, model, None]
+
+resourceAddHandler = {
+	'ip': addIp,
+	'fs': addFs,
+	'gfs': addGfs,
+	'nfsm': addNfsm,
+	'nfsx': addNfsx,
+	'nfsc': addNfsc,
+	'scr': addScr,
+	'smb': addSmb,
+	'vm': addVM,
+	'tomcat-5': addTomcat5,
+	'postgres-8': addPostgres8,
+	'apache': addApache,
+	'openldap': addOpenLDAP,
+	'mysql': addMySQL
+}
+
+def resolveClusterChanges(self, clusterName, model):
+	try:
+		mb_nodes = model.getNodes()
+		if not mb_nodes or not len(mb_nodes):
+			raise Exception, 'node list is empty'
+	except Exception, e:
+		luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %s' \
+				% (str(e), clusterName))
+		return 'Unable to find cluster nodes for %s' % clusterName
+
+	try:
+		cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		if not cluster_node:
+			raise Exception, 'cluster node is none'
+	except Exception, e:
+		luci_log.debug('RCC1: cant find cluster node for %s: %s'
+			% (clusterName, str(e)))
+		return 'Unable to find an entry for %s in the Luci database.' % clusterName
+
+	try:
+		db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
+		if not db_nodes or not len(db_nodes):
+			raise Exception, 'no database nodes'
+	except Exception, e:
+		# Should we just create them all? Can this even happen?
+		luci_log.debug('RCC2: error: %s' % str(e))
+		return 'Unable to find database entries for any nodes in %s' % clusterName
+
+	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
+
+	# this is a really great algorithm.
+	missing_list = list()
+	new_list = list()
+	for i in mb_nodes:
+		for j in db_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			new_list.append(i)
+
+	for i in db_nodes:
+		for j in mb_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			missing_list.append(i)
+
+	messages = list()
+	for i in missing_list:
+		try:
+			## or alternately
+			##new_node = cluster_node.restrictedTraverse(i)
+			##setNodeFlag(self, new_node, CLUSTER_NODE_NOT_MEMBER)
+			cluster_node.delObjects([i])
+			messages.append('Node \"%s\" is no longer in a member of cluster \"%s\." It has been deleted from the management interface for this cluster.' % (i, clusterName))
+			luci_log.debug_verbose('VCC3: deleted node %s' % i)
+		except Exception, e:
+			luci_log.debug_verbose('VCC4: delObjects: %s: %s' % (i, str(e)))
+
+	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
+	for i in new_list:
+		try:
+			cluster_node.manage_addFolder(i, '__luci__:csystem:' + clusterName)
+			new_node = cluster_node.restrictedTraverse(i)
+			setNodeFlag(self, new_node, new_flags)
+			messages.append('A new cluster node, \"%s,\" is now a member of cluster \"%s.\" It has been added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.' % (i, clusterName))
+		except Exception, e:
+			messages.append('A new cluster node, \"%s,\" is now a member of cluster \"%s,\". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clusterName))
+			luci_log.debug_verbose('VCC5: addFolder: %s/%s: %s' \
+				% (clusterName, i, str(e)))
+
+	return messages
+
+def addResource(self, request, model, res, res_type):
+	clustername = model.getClusterName()
+	if not clustername:
+		luci_log.debug_verbose('addResource0: no cluname from mb')
+		return 'Unable to determine cluster name'
+
+	rc = getRicciAgent(self, clustername)
+	if not rc:
+		luci_log.debug_verbose('addResource1: unable to find a ricci agent for cluster %s' % clustername)
+		return 'Unable to find a ricci agent for the %s cluster' % clustername
+
+	try:
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		luci_log.debug_verbose('addResource2: adding the new resource failed: %s' % str(e))
+		return 'Unable to add the new resource'
+
+	try:
+		cp = model.getClusterPtr()
+		cp.incrementConfigVersion()
+		model.setModified(True)
+		conf = model.exportModelAsString()
+		if not conf:
+			raise Exception, 'model string for %s is blank' % clustername
+	except Exception, e:
+		luci_log.debug_verbose('addResource3: exportModelAsString : %s' \
+			% str(e))
+		return 'An error occurred while adding this resource'
+
+	try:
+		ragent = rc.hostname()
 		if not ragent:
 			luci_log.debug_verbose('addResource4: missing ricci hostname')
 			raise Exception, 'unknown ricci agent hostname'
@@ -6049,7 +6820,7 @@
 		request.SESSION.set('model', model)
 	except:
 		luci_log.debug_verbose('Appending model to request failed')
-		return 'An error occurred while storing the cluster model.' 
+		return 'An error occurred while storing the cluster model.'
 
 def resolve_nodename(self, clustername, nodename):
 	path = str(CLUSTER_FOLDER_PATH + clustername)
@@ -6206,7 +6977,7 @@
 			msg += 'Fix the error and try again:\n'
 		else:
 			msg += 'PASSED\n'
-			
+
 			msg += 'Making sure no clustername change has accured - '
 			new_name = cc_xml.firstChild.getAttribute('name')
 			if new_name != clustername:
@@ -6214,13 +6985,13 @@
 				msg += 'Fix the error and try again:\n'
 			else:
 				msg += 'PASSED\n'
-				
+
 				msg += 'Increasing cluster version number - '
 				version = cc_xml.firstChild.getAttribute('config_version')
 				version = int(version) + 1
 				cc_xml.firstChild.setAttribute('config_version', str(version))
 				msg += 'DONE\n'
-				
+
 				msg += 'Propagating new cluster.conf'
 				rc = getRicciAgent(self, clustername)
 				if not rc:
@@ -6231,7 +7002,7 @@
 					if batch_id is None or result is None:
 						luci_log.debug_verbose('VFA: setClusterConf: batchid or result is None')
 						msg += '\nUnable to propagate the new cluster configuration for ' + clustername + '\n\n'
-					else:	
+					else:
 						msg += ' - DONE\n'
 						cc = cc_xml.toxml()
 						msg += '\n\nALL DONE\n\n'



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-17 22:26 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-17 22:26 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-01-17 22:26:28

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix related to bz212021 found during QE testing

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.19&r2=1.120.2.20

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/15 18:21:50	1.120.2.19
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/17 22:26:27	1.120.2.20
@@ -1600,7 +1600,13 @@
 
 		if 'existing_device' in fence_form:
 			del fence_form['existing_device']
-			fencedev_name = fence_form['name']
+
+			try:
+				fencedev_name = fence_form['name']
+				if not fencedev_name.strip():
+					raise Exception, 'no fence name'
+			except Exception, e:
+				return (False, {'errors': [ 'You must provide a unique name for all fence devices.' ]})
 
 			if fence_type is None:
 				# An unknown device. Pull the data out of
@@ -1654,7 +1660,13 @@
 					instance_list.append({'name': fencedev_name })
 		else:
 			# The user created a new fence device.
-			fencedev_name = fence_form['name']
+			try:
+				fencedev_name = fence_form['name']
+				if not fencedev_name.strip():
+					raise Exception, 'no fence name'
+			except Exception, e:
+				return (False, {'errors': [ 'You must provide a unique name for all fence devices.' ]})
+
 			fencedev_obj = FenceDevice()
 			for k in fence_form.keys():
 				if fence_form[k]:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-17 22:14 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-17 22:14 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-17 22:14:02

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	force users to provide a fence name for new devices created in the node context

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.203&r2=1.204

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/11 22:49:42	1.203
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/17 22:14:02	1.204
@@ -1600,7 +1600,13 @@
 
 		if 'existing_device' in fence_form:
 			del fence_form['existing_device']
-			fencedev_name = fence_form['name']
+
+			try:
+				fencedev_name = fence_form['name']
+				if not fencedev_name.strip():
+					raise Exception, 'no fence name'
+			except Exception, e:
+				return (False, {'errors': [ 'You must provide a unique name for all fence devices.' ]})
 
 			if fence_type is None:
 				# An unknown device. Pull the data out of
@@ -1654,7 +1660,13 @@
 					instance_list.append({'name': fencedev_name })
 		else:
 			# The user created a new fence device.
-			fencedev_name = fence_form['name']
+			try:
+				fencedev_name = fence_form['name']
+				if not fencedev_name.strip():
+					raise Exception, 'no fence name'
+			except Exception, e:
+				return (False, {'errors': [ 'You must provide a unique name for all fence devices.' ]})
+
 			fencedev_obj = FenceDevice()
 			for k in fence_form.keys():
 				if fence_form[k]:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-10 23:33 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2007-01-10 23:33 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2007-01-10 23:33:29

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix fencedev marker for bz212021

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.199&r2=1.200

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/10 22:45:35	1.199
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/10 23:33:27	1.200
@@ -4387,7 +4387,7 @@
           last_kid_fd = None
           level1.append(fencedev)
         else:  #This dev is shared
-          if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd.getName().strip()):  #just append a new instance struct to last_kid_fd
+          if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):  #just append a new instance struct to last_kid_fd
             instance_struct = {}
             instance_struct['id'] = str(minor_num)
             minor_num = minor_num + 1
@@ -4400,7 +4400,7 @@
             #Now just add this struct to last_kid_fd and reset last_kid_fd
             ilist = last_kid_fd['instance_list']
             ilist.append(instance_struct)
-            last_kid_fd = fd
+            #last_kid_fd = fd
             continue
           else: #Shared, but not used above...so we need a new fencedev struct
             fencedev = {}
@@ -4428,7 +4428,7 @@
               instance_struct[kee] = kidattrs[kee]
             inlist.append(instance_struct) 
             level1.append(fencedev)
-            last_kid_fd = fd
+            last_kid_fd = fencedev
             continue
     map['level1'] = level1
 
@@ -4494,7 +4494,7 @@
           last_kid_fd = None
           level2.append(fencedev)
         else:  #This dev is shared
-          if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd.getName().strip()):  #just append a new instance struct to last_kid_fd
+          if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):  #just append a new instance struct to last_kid_fd
             instance_struct = {}
             instance_struct['id'] = str(minor_num)
             minor_num = minor_num + 1
@@ -4507,7 +4507,7 @@
             #Now just add this struct to last_kid_fd and reset last_kid_fd
             ilist = last_kid_fd['instance_list']
             ilist.append(instance_struct)
-            last_kid_fd = fd
+            #last_kid_fd = fd
             continue
           else: #Shared, but not used above...so we need a new fencedev struct
             fencedev = {}
@@ -4535,7 +4535,7 @@
               instance_struct[kee] = kidattrs[kee]
             inlist.append(instance_struct) 
             level2.append(fencedev)
-            last_kid_fd = fd
+            last_kid_fd = fencedev
             continue
     map['level2'] = level2
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-10 22:45 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-10 22:45 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-10 22:45:35

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	sort the list of fence devices to preserve the ordering intended

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.198&r2=1.199

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/10 21:40:05	1.198
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/10 22:45:35	1.199
@@ -1503,7 +1503,7 @@
 			raise Exception, 'No method ID'
 		fence_method = Method()
 		fence_method.addAttribute('name', str(method_id))
-		node.children[0].children[fence_level_num - 1] = fence_method
+		levels[fence_level_num - 1] = fence_method
 	except Exception, e:
 		method_id = fence_level
 		fence_method = Method()
@@ -1571,7 +1571,9 @@
 		else:
 			form_hash[form_id] = (dummy_form, list())
 
-	for i in form_hash:
+	fh_keys = form_hash.keys()
+	fh_keys.sort()
+	for i in fh_keys:
 		fencedev_name = None
 		fencedev_unknown = False
 		fencedev_obj = None
@@ -1650,7 +1652,6 @@
 					# Add back the tags under the method block
 					# for the fence instance
 					instance_list.append({'name': fencedev_name })
-
 		else:
 			# The user created a new fence device.
 			fencedev_name = fence_form['name']
@@ -1739,7 +1740,6 @@
 			% str(e))
 		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration.' ]})
 
-
 	rc = getRicciAgent(self, clustername)
 	if not rc:
 		luci_log.debug_verbose('vNFC18: unable to find a ricci agent for cluster %s' % clustername)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2007-01-10 20:06 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2007-01-10 20:06 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-01-10 20:06:26

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	code to propagate node fence config changes

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.196&r2=1.197

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/10 16:00:42	1.196
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/01/10 20:06:26	1.197
@@ -23,6 +23,7 @@
 from QuorumD import QuorumD
 from Heuristic import Heuristic
 from clusterOS import resolveOSType
+from Fence import Fence
 from Method import Method
 from FenceDevice import FenceDevice
 from Device import Device
@@ -1448,7 +1449,7 @@
 	try:
 		fence_level = int(request.form['fence_level'].strip())
 	except Exception, e:
-		luci_log.debug_verbose('vNFC0a: no fence level: %s' % str(e))
+		luci_log.debug_verbose('vNFC1: no fence level: %s' % str(e))
 		return (False, {'errors': ['No fence level was supplied.']})
 
 	try:
@@ -1456,7 +1457,7 @@
 		if not nodename:
 			raise Exception, 'nodename is blank'
 	except Exception, e:
-		luci_log.debug_verbose('vNFC1: no nodename: %s' % str(e))
+		luci_log.debug_verbose('vNFC2: no nodename: %s' % str(e))
 		return (False, {'errors': ['No node name was given.']})
 
 	try:
@@ -1464,7 +1465,7 @@
 		if not clustername:
 			raise Exception, 'clustername is blank'
 	except Exception, e:
-		luci_log.debug_verbose('vNFC2: no clustername: %s' % str(e))
+		luci_log.debug_verbose('vNFC3: no clustername: %s' % str(e))
 		return (False, {'errors': ['No cluster name was given.']})
 
 	try:
@@ -1479,7 +1480,7 @@
 			model = None
 
 	if model is None:
-		luci_log.debug_verbose('vNFC2a: unable to get model for cluster %s' % clustername)
+		luci_log.debug_verbose('vNFC4: unable to get model for cluster %s' % clustername)
 		return (False, {'errors': ['No cluster model was found.']})
 
 	try:
@@ -1488,15 +1489,30 @@
 		if len(forms) < 1:
 			raise
 	except Exception, e:
-		luci_log.debug_verbose('vNFC2b: error: %s' % str(e))
+		luci_log.debug_verbose('vNFC5: error: %s' % str(e))
 		return (False, {'errors': ['The fence data submitted is not properly formed.']})
 
+	try:
+		node = model.retrieveNodeByName(nodename)
+	except GeneralError, e:
+		luci_log.debug_verbose('vNFC6: unable to find node name %s in current node list' % (str(nodename), str(e)))
+		return (False, {'errors': ['Unable to find the cluster node %s in the node list.' % str(nodename) ]})
+
+	fence_level_num = int(fence_level)
+	levels = node.getFenceLevels()
+	try:
+		method_id = levels[fence_level_num - 1].getAttribute('name')
+		if not method_id:
+			raise Exception, 'No method ID'
+	except Exception, e:
+		method_id = fence_level
+	
 	fence_method = Method()
-	fence_method.addAttribute('name', str(fence_level))
+	fence_method.addAttribute('name', str(method_id))
 
 	form_hash = {}
 	for i in forms:
-		form_id = i.getAttribute('name')
+		form_id = i.getAttribute('id')
 		if not form_id:
 			continue
 		ielems = i.getElementsByTagName('input')
@@ -1509,7 +1525,7 @@
 			try:
 				input_type = str(i.getAttribute('type'))
 			except Exception, e:
-				luci_log.debug_verbose('vNFC4: input type: %s' % str(e))
+				luci_log.debug_verbose('vNFC7: input type: %s' % str(e))
 				continue
 
 			if not input_type or input_type == 'button':
@@ -1518,19 +1534,24 @@
 			try:
 				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
 			except Exception, e:
-				luci_log.debug_verbose('vNFC5: parsing XML: %s' % str(e))
+				luci_log.debug_verbose('vNFC8: parsing XML: %s' % str(e))
 
 		if len(dummy_form) < 1:
 			continue
 
-		if 'instance' in dummy_form:
+		if 'fence_instance' in dummy_form:
 			try:
 				parent = dummy_form['parent_fencedev']
-				parent_form = form_hash[parent][1].append(dummy_form)
 			except:
-				luci_log.debug_verbose('vNFC6: no parent for instance: %s' \
-					% dummy_form['name'])
-				raise Exception, 'instance has no owner'
+				luci_log.debug_verbose('vNFC9: no parent for instance')
+				return (False, {'errors': [ 'Unable to determine what device the current instance uses.' ]})
+
+			try:
+				parent_form = form_hash[parent][1].append(dummy_form)
+				del dummy_form['fence_instance']
+			except Exception, e:
+				luci_log.debug_verbose('vNFC10: no parent for instance')
+				return (False, {'errors': [ 'Unable to determine what device the current instance uses.' ]})
 		else:
 			form_hash[form_id] = (dummy_form, list())
 
@@ -1542,7 +1563,7 @@
 		try:
 			fence_form, instance_list = form_hash[i]
 		except Exception, e:
-			luci_log.debug_verbose('vNFC7: %s' % str(e))
+			luci_log.debug_verbose('vNFC11: %s' % str(e))
 			continue
 
 		try:
@@ -1551,7 +1572,7 @@
 				raise Exception, 'fence type is blank'
 			fence_form['agent'] = fence_type
 		except Exception, e:
-			luci_log.debug_verbose('vNFC8: %s' % str(e))
+			luci_log.debug_verbose('vNFC12: %s %s' % (i, str(e)))
 			fence_type = None
 
 		try:
@@ -1559,9 +1580,9 @@
 		except:
 			pass
 
-		fencedev_name = fence_form['name']
 		if 'existing_device' in fence_form:
 			del fence_form['existing_device']
+			fencedev_name = fence_form['name']
 
 			if fence_type is None:
 				# An unknown device. Pull the data out of
@@ -1584,18 +1605,36 @@
 							raise Exception, 'old name is blank'
 						del fence_form['old_name']
 					except Exception, e:
-						luci_log.debug_verbose('vNFC8: no old name for %s %s' \
+						luci_log.debug_verbose('vNFC12: no old name for %s %s' \
 							% (fence_form['name'], str(e)))
+						return (False, {'errors': [ 'Unable to determine the original name for the device now named %s' % fencedev_name ]})
+
+					fence_dev_list = model.getFenceDevices()
+					fencedev_obj = None
+					for fd in fence_dev_list:
+						if fd.getAttribute('name') == 'old_name':
+							fencedev_obj = fd
+							try:
+								model.fencedevices_ptr.removeChild(fd)
+							except Exception, e:
+								luci_log.debug_verbose('VNFC8a: %s: %s' \
+									% (old_name, str(e)))
+								return (False, {'errors': [ 'Unable to remove old fence device %s' % old_name ]})
+							break
+					if fencedev_obj is None:
+						luci_log.debug_verbose('vNFC14: no fence device named %s was found' % old_name)
+						return (False, {'errors': ['No fence device named %s was found' % old_name ] })
 
-					fencedev_obj = FenceDevice()
 					for k in fence_form.keys():
-						fencedev_obj.addAttribute(k, fence_form[k])
+						if fence_form[k]:
+							fencedev_obj.addAttribute(k, str(fence_form[k]))
 		else:
 			# The user created a new fence device.
 			fencedev_name = fence_form['name']
 			fencedev_obj = FenceDevice()
 			for k in fence_form.keys():
-				fencedev_obj.addAttribute(k, fence_form[k])
+				if fence_form[k]:
+					fencedev_obj.addAttribute(k, str(fence_form[k]))
 
 		if fencedev_obj is not None:
 			# If a device with this name exists in the model
@@ -1603,31 +1642,81 @@
 			# this block is not executed, we don't need to make
 			# any changes to the fencedevices block for this
 			# device
-			pass
+			fence_dev_list = model.getFenceDevices()
+			for fd in fence_dev_list:
+				if fencedev_name == fd.getAttribute('name'):
+					luci_log.debug_verbose('vNFC15: fence ident %s already in use' % fencedev_name)
+					return (False, {'errors': ['There is already a fence device named %s' % fencedev_name ] })
+			model.fencedevices_ptr.addChild(fencedev_obj)
 
 		if fencedev_unknown is True:
 			# Save any instances for this fence device.
 			pass
 
 		for inst in instance_list:
-			new_instance = not 'existing_instance' in inst
 			try:
-				del inst['new_instance']
 				del inst['parent_fencedev']
-				del inst['existing_instance']
+			except:
+				pass
+			try:
+				del inst['new_instance']
+			except:
+				pass
+			try:
 				del inst['name']
 			except:
 				pass
+			try:
+				del inst['existing_instance']
+			except:
+				pass
 
 			device_obj = Device()
 			device_obj.setAgentType(fence_type)
 			device_obj.addAttribute('name', fencedev_name)
-			for k in new_instance.keys():
-				device_obj.addAttribute(k, inst[k])
+			for k in inst.keys():
+				if inst[k]:
+					device_obj.addAttribute(k, str(inst[k]))
 			fence_method.addChild(device_obj)
 
-	luci_log.debug_verbose('vNFC7: got xml: %s' % doc.toprettyxml())
-	return (False, { 'errors': [ doc.toprettyxml() ]})
+		try:
+			levels[fence_level_num - 1] = fence_method
+		except:
+			fence_node = Fence()
+			fence_node.addChild(fence_method)
+			node.addChild(fence_node)
+
+	try:
+		cp = model.getClusterPtr()
+		cp.incrementConfigVersion()
+		model.setModified(True)
+		conf = str(model.exportModelAsString())
+		if not conf:
+			raise Exception, 'model string is blank'
+		luci_log.debug_verbose('vNFC16: exported \"%s\"' % conf)
+	except Exception, e:
+		luci_log.debug_verbose('vNFC17: exportModelAsString failed: %s' \
+			% str(e))
+		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration.' ]})
+
+	rc = getRicciAgent(self, clustername)
+	if not rc:
+		luci_log.debug_verbose('vNFC18: unable to find a ricci agent for cluster %s' % clustername)
+		return (False, {'errors': ['Unable to find a ricci agent for the %s cluster' % clustername ]})
+	ragent = rc.hostname()
+
+	batch_number, result = setClusterConf(rc, conf)
+	if batch_number is None or result is None:
+		luci_log.debug_verbose('vNFC19: missing batch and/or result')
+		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration.' ]})
+
+	try:
+		set_node_flag(self, clustername, ragent, str(batch_number), FENCEDEV_NODE_CONFIG, "Updating fence configuration for node \'%s\'" % nodename)
+	except Exception, e:
+		luci_log.debug_verbose('vNFC20: failed to set flags: %s' % str(e))
+
+	response = request.RESPONSE
+	response.redirect(request['URL'] + "?pagetype=" + NODE + "&clustername=" + clustername + '&nodename=' + nodename + '&busyfirst=true')
 
 def deleteFenceDevice(self, request):
   errors = list()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-12-20 20:40 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-12-20 20:40 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-12-20 20:40:31

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	forgot a }

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.188&r2=1.189

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/20 20:24:27	1.188
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/20 20:40:31	1.189
@@ -1334,7 +1334,7 @@
     fencedev_name = form['orig_name']
     fencedev_name = fencedev_name.strip()
   except KeyError, e:
-    return (False, {'errors':['No device name in form submission'])
+    return (False, {'errors':['No device name in form submission']})
 
   fdev_to_delete = None:
   #iterate thru list of current fencedevs and find one to be deleted



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-12-14 21:37 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-12-14 21:37 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-12-14 21:37:15

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 

Log message:
	more fixes for delete cluster

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.182&r2=1.183
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.40&r2=1.41

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/14 18:22:53	1.182
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/14 21:37:15	1.183
@@ -1850,8 +1850,16 @@
 			luci_log.debug('GRA1: no cluster nodes for %s found.' % clustername)
 			raise Exception, 'no cluster nodes were found at %s' % path
 	except Exception, e:
-		luci_log.debug('GRA2: cluster folder %s for %s is missing: %s.' \
-			% (path, clustername, str(e)))
+		try:
+			luci_log.debug('GRA2: cluster folder %s for %s is missing: %s.' \
+				% (path, clustername, str(e)))
+
+			if len(clusterfolder.objectItems()) < 1:
+				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+				clusters.manage_delObjects([clustername])
+		except Exception, e:
+			luci_log.debug_verbose('GRA3: %s' % str(e))
+
 		return None
 
 	cluname = lower(clustername)
@@ -2040,7 +2048,7 @@
 		luci_log.debug_verbose('GCS0: error: %s' % str(e))
 		doc = None
 
-	if doc is None:
+	if doc is None and not cluname:
 		try:
 			model = request.SESSION.get('model')
 			cinfo = getClusterStatusModel(model)
@@ -2832,7 +2840,7 @@
 			continue
 
 		if delete is True:
-			if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
+			if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=True) is None:
 				luci_log.debug_verbose('CStop1: nodeDelete failed')
 				errors += 1
 		else:
@@ -2852,7 +2860,8 @@
 	return snum_err + jnum_err
 
 def clusterDelete(self, model):
-	if clusterStop(self, model, delete=True) < 1:
+	num_errors = clusterStop(self, model, delete=True)
+	if num_errors < 1:
 		try:
 			clustername = model.getClusterName()
 		except Exception, e:
@@ -2862,8 +2871,20 @@
 		try:
 			delCluster(self, clustername)
 		except Exception, e:
-			luci_log.debug_verbose('clusterDelete0: %s: %s' \
+			luci_log.debug_verbose('clusterDelete1: %s: %s' \
+				% (clustername, str(e)))
+
+		try:
+			clusterfolder = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH + clustername))
+			if len(clusterfolder.objectItems()) < 1:
+				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+				clusters.manage_delObjects([clustername])
+		except Exception, e:
+			luci_log.debug_verbose('clusterDelete2: %s %s' \
 				% (clustername, str(e)))
+	else:
+		luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
+			% (clustername, num_errors))
 
 def forceNodeReboot(self, rc, clustername, nodename_resolved):
 	batch_number, result = nodeReboot(rc)
@@ -2943,21 +2964,14 @@
 		luci_log.debug_verbose('FNF4: failed to set flags: %s' % str(e))
 	return True
 
-def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved):
+def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
 	# We need to get a node name other than the node
 	# to be deleted, then delete the node from the cluster.conf
 	# and propogate it. We will need two ricci agents for this task,
-	# unless we are deleting the last remaining cluster node.
+	# unless we are deleting the cluster itself.
 
-	if len(model.getNodes()) == 1:
-		# If this is the last cluster node, we don't need a second
-		# node to propagate a new cluster.conf file. We need only to
-		# stop the final node and delete its cluster.conf file.
-		last_node = True
-	else:
+	if not delete_cluster:
 		# Make sure we can find a second node before we hose anything.
-
-		last_node = False
 		found_one = False
 
 		path = str(CLUSTER_FOLDER_PATH + clustername)
@@ -3024,32 +3038,40 @@
 		luci_log.debug_verbose('ND5: batch_number and/or result is None')
 		return None
 
-	# It is not worth flagging this node in DB, as we are going
-	# to delete it anyway. Now, we need to delete node from model
-	# and send out new cluster.conf
-	delete_target = None
-	nodelist = model.getNodes()
-	find_node = lower(nodename)
-	for n in nodelist:
+	# Unless we're deleting the whole cluster, it is not worth
+	# flagging this node in DB, as we are going to delete it
+	# anyway. Now, we need to delete node from model and send out
+	# new cluster.conf
+
+	if delete_cluster:
 		try:
-			if lower(n.getName()) == find_node:
-				delete_target = n
-				break
-		except:
-			continue
+			set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_DELETE, "Deleting cluster \"%s\": Deleting node \'%s\'" \
+				% (clustername, nodename_resolved))
+		except Exception, e:
+			luci_log.debug_verbose('ND5a: failed to set flags: %s' % str(e))
+	else:
+		delete_target = None
+		nodelist = model.getNodes()
+		find_node = lower(nodename)
+		for n in nodelist:
+			try:
+				if lower(n.getName()) == find_node:
+					delete_target = n
+					break
+			except:
+				continue
 
-	if delete_target is None:
-		luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
-			% (nodename, clustername))
-		return None
+		if delete_target is None:
+			luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
+				% (nodename, clustername))
+			return None
 
-	try:
-		model.deleteNode(delete_target)
-	except Exception, e:
-		luci_log.debug_verbose('ND6a: deleteNode %s failed: %s' \
-			% (delete_target.getName(), str(e)))
+		try:
+			model.deleteNode(delete_target)
+		except Exception, e:
+			luci_log.debug_verbose('ND6a: deleteNode %s failed: %s' \
+				% (delete_target.getName(), str(e)))
 
-	if not last_node:
 		try:
 			str_buf = model.exportModelAsString()
 			if not str_buf:
@@ -3066,22 +3088,20 @@
 
 	# Now we need to delete the node from the DB
 	path = str(CLUSTER_FOLDER_PATH + clustername)
-	del_path = str(path + '/' + nodename_resolved)
-
 	try:
-		delnode = self.restrictedTraverse(del_path)
 		clusterfolder = self.restrictedTraverse(path)
-		clusterfolder.manage_delObjects(delnode[0])
+		clusterfolder.manage_delObjects([nodename_resolved])
 	except Exception, e:
-		luci_log.debug_verbose('ND9: error deleting %s: %s' \
-			% (del_path, str(e)))
+		luci_log.debug_verbose('ND9: error deleting %s@%s: %s' \
+			% (nodename_resolved, path, str(e)))
 
-	if not last_node:
-		try:
-			set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
-		except Exception, e:
-			luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
+	if delete_cluster:
+		return True
 
+	try:
+		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
+	except Exception, e:
+		luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
 	return True
 
 def nodeTaskProcess(self, model, request):
@@ -5293,6 +5313,7 @@
 	batch_id = str(batchid)
 	objname = str(agent + '____flag')
 
+	objpath = ''
 	try:
 		clusterfolder = self.restrictedTraverse(path)
 		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/11/02 21:12:07	1.40
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/12/14 21:37:15	1.41
@@ -925,15 +925,15 @@
 		try:
 			return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
 		except Exception, e:
-			luci_log.debug_verbose('GCS0: %s: %s' % (clusterName, str(e)))
+			luci_log.debug_verbose('GCSy0: %s: %s' % (clusterName, str(e)))
 			return None
 
 	try:
 		i = getSecurityManager().getUser()
 		if not i:
-			raise Exception, 'GCSMGU failed'
+			raise Exception, 'security manager says no user'
 	except Exception, e:
-		luci_log.debug_verbose('GCS1: %s: %s' % (clusterName, str(e)))
+		luci_log.debug_verbose('GCSy1: %s: %s' % (clusterName, str(e)))
 		return None
 
 	try:
@@ -941,7 +941,7 @@
 		if not csystems or len(csystems) < 1:
 			return None
 	except Exception, e:
-		luci_log.debug_verbose('GCS2: %s: %s' % (clusterName, str(e)))
+		luci_log.debug_verbose('GCSy2: %s: %s' % (clusterName, str(e)))
 		return None
 
 	allowedCSystems = list()
@@ -950,7 +950,7 @@
 			if i.has_role('View', c[1]):
 				allowedCSystems.append(c)
 		except Exception, e:
-			luci_log.debug_verbose('GCS3: %s: %s: %s' \
+			luci_log.debug_verbose('GCSy3: %s: %s: %s' \
 				% (clusterName, c[0], str(e)))
 
 	return allowedCSystems
@@ -1356,17 +1356,25 @@
 		cluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
 		if not cluster:
 			raise Exception, 'cluster DB entry is missing'
-		csystems = getClusterSystems(self, clusterName)
-	except Exception, e:
-		luci_log.debug_verbose('delCluSysterms: error for %s: %s' \
-			% (clusterName, str(e)))
-		return 'Unable to find any systems for cluster %s' % clusterName
+
+		try:
+			csystems = getClusterSystems(self, clusterName)
+			if not csystems or len(csystems) < 1:
+				return None
+		except Exception, e:
+			luci_log.debug_verbose('delCluSystems0: %s' % str(e))
+			return None
+	except Exception, er:
+		luci_log.debug_verbose('delCluSystems1: error for %s: %s' \
+			% (clusterName, str(er)))
+		return str(er)
 
 	errors = ''
 	for i in csystems:
 		err = delClusterSystem(self, cluster, i[0])
 		if err:
 			errors += 'Unable to delete the cluster system %s: %s\n' % (i[0], err)
+			luci_log.debug_verbose('delCluSystems2: %s' % err)
 	return errors
 
 def getDefaultUser(self, request):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-12-14 17:03 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-12-14 17:03 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-12-14 17:02:57

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix for the case in delete cluster where we delete the last remaining node

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.180&r2=1.181

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/11 22:42:34	1.180
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/14 17:02:56	1.181
@@ -26,7 +26,7 @@
 from clusterOS import resolveOSType
 from FenceHandler import FenceHandler, FENCE_OPTS
 from GeneralError import GeneralError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
+from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode, delCluster
 from LuciSyslog import LuciSyslog
 
 #Policy for showing the cluster chooser menu:
@@ -2807,14 +2807,20 @@
 		try:
 			rc = RicciCommunicator(nodename_resolved)
 		except Exception, e:
-			luci_log.debug_verbose('[%d] CStop0: RC %s: %s' \
+			luci_log.debug_verbose('CStop0: [%d] RC %s: %s' \
 				% (delete, nodename_resolved, str(e)))
 			errors += 1
 			continue
-		if nodeLeave(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('[%d] CStop1: nodeLeave %s' \
-				% (delete, nodename_resolved))
-			errors += 1
+
+		if delete is True:
+			if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
+				luci_log.debug_verbose('CStop1: nodeDelete failed')
+				errors += 1
+		else:
+			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
+				luci_log.debug_verbose('CStop2: nodeLeave %s' \
+					% (delete, nodename_resolved))
+				errors += 1
 	return errors
 
 def clusterRestart(self, model):
@@ -2827,7 +2833,18 @@
 	return snum_err + jnum_err
 
 def clusterDelete(self, model):
-	return clusterStop(self, model, delete=True)
+	if clusterStop(self, model, delete=True) < 1:
+		try:
+			clustername = model.getClusterName()
+		except Exception, e:
+			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+			return None
+
+		try:
+			delCluster(self, clustername)
+		except Exception, e:
+			luci_log.debug_verbose('clusterDelete0: %s: %s' \
+				% (clustername, str(e)))
 
 def forceNodeReboot(self, rc, clustername, nodename_resolved):
 	batch_number, result = nodeReboot(rc)
@@ -2908,75 +2925,89 @@
 	return True
 
 def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved):
-	#We need to get a node name other than the node
-	#to be deleted, then delete the node from the cluster.conf
-	#and propogate it. We will need two ricci agents for this task.
+	# We need to get a node name other than the node
+	# to be deleted, then delete the node from the cluster.conf
+	# and propogate it. We will need two ricci agents for this task,
+	# unless we are deleting the last remaining cluster node.
+
+	if len(model.getNodes()) == 1:
+		# If this is the last cluster node, we don't need a second
+		# node to propagate a new cluster.conf file. We need only to
+		# stop the final node and delete its cluster.conf file.
+		last_node = True
+	else:
+		# Make sure we can find a second node before we hose anything.
 
-	# Make sure we can find a second node before we hose anything.
-	path = str(CLUSTER_FOLDER_PATH + clustername)
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		if not clusterfolder:
-			raise Exception, 'no cluster folder at %s' % path
-	except Exception, e:
-		luci_log.debug_verbose('ND0: node delete error for cluster %s: %s' \
-				% (clustername, str(e)))
-		return None
+		last_node = False
+		found_one = False
 
-	try:
-		nodes = clusterfolder.objectItems('Folder')
-		if not nodes or len(nodes) < 1:
-			raise Exception, 'no cluster nodes in DB'
-	except Exception, e:
-		luci_log.debug_verbose('ND1: node delete error for cluster %s: %s' \
-			% (clustername, str(e)))
+		path = str(CLUSTER_FOLDER_PATH + clustername)
 
-	found_one = False
-	for node in nodes:
-		if node[1].getId().find(nodename) != (-1):
-			continue
-		#here we make certain the node is up...
-		# XXX- we should also make certain this host is still
-		# in the cluster we believe it is.
 		try:
-			rc2 = RicciCommunicator(node[1].getId())
+			clusterfolder = self.restrictedTraverse(path)
+			if not clusterfolder:
+				raise Exception, 'no cluster folder@%s' % path
 		except Exception, e:
-			luci_log.info('ND2: ricci %s error: %s' % (node[0], str(e)))
-			continue
+			luci_log.debug_verbose('ND0: node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
+			return None
 
-		if not rc2.authed():
-			try:
-				setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
+		try:
+			nodes = clusterfolder.objectItems('Folder')
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes in DB'
+		except Exception, e:
+			luci_log.debug_verbose('ND1: node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
+
+		for node in nodes:
+			if node[1].getId().find(nodename) != (-1):
+				continue
+			# here we make certain the node is up...
+			# XXX- we should also make certain this host is still
+			# in the cluster we believe it is.
 
 			try:
-				snode = getStorageNode(self, node[0])
-				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
+				rc2 = RicciCommunicator(node[1].getId())
+				if not rc2:
+					raise Exception, 'ND1a: rc2 is None'
+			except Exception, e:
+				luci_log.info('ND2: ricci %s error: %s' % (node[0], str(e)))
+				continue
 
-			luci_log.debug_verbose('ND3: %s is not authed' % node[0])
-			rc2 = None
-			continue
-		else:
-			found_one = True
-			break
+			if not rc2.authed():
+				try:
+					setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
 
-	if not found_one:
-		luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
-		return None
+				try:
+					snode = getStorageNode(self, node[0])
+					setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				luci_log.debug_verbose('ND3: %s is not authed' % node[0])
+				rc2 = None
+				continue
+			else:
+				found_one = True
+				break
+
+		if not found_one:
+			luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
+			return None
 
-	#First, delete cluster.conf from node to be deleted.
-	#next, have node leave cluster.
+	# First, delete cluster.conf from node to be deleted.
+	# next, have node leave cluster.
 	batch_number, result = nodeLeaveCluster(rc, purge=True)
 	if batch_number is None or result is None:
 		luci_log.debug_verbose('ND5: batch_number and/or result is None')
 		return None
 
-	#It is not worth flagging this node in DB, as we are going
-	#to delete it anyway. Now, we need to delete node from model
-	#and send out new cluster.conf
+	# It is not worth flagging this node in DB, as we are going
+	# to delete it anyway. Now, we need to delete node from model
+	# and send out new cluster.conf
 	delete_target = None
 	nodelist = model.getNodes()
 	find_node = lower(nodename)
@@ -2989,27 +3020,32 @@
 			continue
 
 	if delete_target is None:
-		luci_log.debug_verbose('ND6: unable to find delete target for %s in %s' \
+		luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
 			% (nodename, clustername))
 		return None
 
-	model.deleteNode(delete_target)
-
 	try:
-		str_buf = model.exportModelAsString()
-		if not str_buf:
-			raise Exception, 'model string is blank'
+		model.deleteNode(delete_target)
 	except Exception, e:
-		luci_log.debug_verbose('ND7: exportModelAsString: %s' % str(e))
-		return None
+		luci_log.debug_verbose('ND6a: deleteNode %s failed: %s' \
+			% (delete_target.getName(), str(e)))
 
-	# propagate the new cluster.conf via the second node
-	batch_number, result = setClusterConf(rc2, str(str_buf))
-	if batch_number is None:
-		luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
-		return None
+	if not last_node:
+		try:
+			str_buf = model.exportModelAsString()
+			if not str_buf:
+				raise Exception, 'model string is blank'
+		except Exception, e:
+			luci_log.debug_verbose('ND7: exportModelAsString: %s' % str(e))
+			return None
 
-	#Now we need to delete the node from the DB
+		# propagate the new cluster.conf via the second node
+		batch_number, result = setClusterConf(rc2, str(str_buf))
+		if batch_number is None:
+			luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
+			return None
+
+	# Now we need to delete the node from the DB
 	path = str(CLUSTER_FOLDER_PATH + clustername)
 	del_path = str(path + '/' + nodename_resolved)
 
@@ -3021,10 +3057,12 @@
 		luci_log.debug_verbose('ND9: error deleting %s: %s' \
 			% (del_path, str(e)))
 
-	try:
-		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
-	except Exception, e:
-		luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
+	if not last_node:
+		try:
+			set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
+		except Exception, e:
+			luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
+
 	return True
 
 def nodeTaskProcess(self, model, request):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-12-08 23:02 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-12-08 23:02 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-12-08 23:02:50

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	more fixes for add node

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.177&r2=1.178
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.47&r2=1.48

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/08 20:47:37	1.177
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/08 23:02:49	1.178
@@ -13,6 +13,7 @@
 from Clusterfs import Clusterfs
 from Fs import Fs
 from RefObject import RefObject
+from ClusterNode import ClusterNode
 from NFSClient import NFSClient
 from NFSExport import NFSExport
 from Service import Service
@@ -329,10 +330,10 @@
 	except Exception, e:
 		luci_log.debug_verbose('vACN2: %s: %s' % (clusterName, str(e)))
 		try:
-			rc = getRicciAgent(self, clusterName)
-			if not rc:
+			cluster_ricci = getRicciAgent(self, clusterName)
+			if not cluster_ricci:
 				raise Exception, 'cannot find a ricci agent for %s' % clusterName
-			cluster_os = getClusterOS(self, rc)['os']
+			cluster_os = getClusterOS(self, cluster_ricci)['os']
 			if clusterObj is None:
 				try:
 					clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
@@ -387,9 +388,64 @@
 	if not cluster_properties['isComplete']:
 		return (False, {'errors': errors, 'requestResults': cluster_properties})
 
+	try:
+		cluster_ricci = getRicciAgent(self, clusterName)
+		if not cluster_ricci:
+			raise Exception, 'Unable to get a ricci agent for %s' % clusterName
+	except Exception, e:
+		cluster_properties['isComplete'] = False
+		nodeUnauth(nodeList)
+		errors.append('Unable to contact a Ricci agent for %s.' % clusterName)
+		luci_log.debug_verbose('vACN6: ricci %s: %s' % (clusterName, str(e)))
+		return (False, {'errors': errors, 'requestResults': cluster_properties})
+
+	try:
+		model = getModelBuilder(None, cluster_ricci, cluster_ricci.dom0())
+		if not model:
+			raise Exception, 'unable to get model for %s' % clusterName
+		nodesptr = model.getClusterNodesPtr()
+		used_ids = {}
+		for i in model.getNodes():
+			try:
+				used_ids[int(i.getAttribute('nodeid'))] = 1
+			except Exception, e:
+				luci_log.debug_verbose('vACN7: %s' % str(e))
+				pass
+		next_node_id = 1;
+		for i in nodeList:
+			next_node_id += 1
+			new_node = ClusterNode()
+			new_node.attr_hash['name'] = i['host']
+			new_node.attr_hash['votes'] = str(1)
+			while next_node_id in used_ids:
+				next_node_id += 1
+			new_node.attr_hash['nodeid'] = str(next_node_id)
+			nodesptr.addChild(new_node)
+
+		model.isModified = True
+		conf_str = str(model.exportModelAsString())
+		if not conf_str:
+			raise Exception, 'unable to export model as a string'
+		batch_number, result = setClusterConf(cluster_ricci, conf_str)
+
+		while True:
+			batch_ret = checkBatch(cluster_ricci, batch_number)
+			code = batch_ret[0]
+			if code == True:
+				break
+			if code == -1:
+				errors.append(batch_ret[1])
+				raise Exception, batch_ret[1]
+			if code == False:
+				time.sleep(0.5)
+	except Exception, e:
+		luci_log.debug_verbose('vACN8: %s' % str(e))
+		errors.append('Unable to update the cluster node list for %s' % clusterName)
+		return (False, {'errors': errors, 'requestResults': cluster_properties})
+
 	error = createClusterSystems(self, clusterName, nodeList)
 	if error:
-		luci_log.debug_verbose('vACN5a: %s: %s' % (clusterName, str(e)))
+		luci_log.debug_verbose('vACN9: %s: %s' % (clusterName, str(e)))
 		nodeUnauth(nodeList)
 		cluster_properties['isComplete'] = False
 		errors.append(error)
@@ -405,7 +461,7 @@
 		except Exception, e:
 			nodeUnauth([clunode['host']])
 			success = False
-			luci_log.info('vACN6: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
+			luci_log.info('vACN10: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
 
 		if success:
 			try:
@@ -414,7 +470,7 @@
 			except Exception, e:
 				nodeUnauth([clunode['host']])
 				success = False
-				luci_log.info('vACN7: %s' % (clunode['host'], str(e)))
+				luci_log.info('vACN11: %s' % (clunode['host'], str(e)))
 
 		if not success:
 			cluster_properties['isComplete'] = False
@@ -4007,19 +4063,26 @@
         rc = RicciCommunicator(ricci[0])
       except Exception, e:
         rc = None
-        finished = False
+        finished = -1
+        err_msg = ''
         luci_log.debug_verbose('ICB15: ricci error: %s: %s' \
           % (ricci[0], str(e)))
 
       if rc is not None:
-        finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
-
-      if finished == True:
+        batch_res = checkBatch(rc, item[1].getProperty(BATCH_ID))
+        finished = batch_res[0]
+        err_msg = batch_res[1]
+
+      if finished == True or finished == -1:
+        if finished == -1:
+          flag_msg = err_msg
+        else:
+          flag_msg = ''
         flag_desc = item[1].getProperty(FLAG_DESC)
         if flag_desc is None:
-          node_report['desc'] = REDIRECT_MSG
+          node_report['desc'] = flag_msg + REDIRECT_MSG
         else:
-          node_report['desc'] = flag_desc + REDIRECT_MSG
+          node_report['desc'] = flag_msg + flag_desc + REDIRECT_MSG
         nodereports.append(node_report)
         try:
             clusterfolder.manage_delObjects(item[0])
@@ -5097,8 +5160,11 @@
 				pass
 			luci_log.info('NNFP2: %s not authenticated' % item[0])
 
-		finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
-		if finished == True:
+		batch_ret = checkBatch(rc, item[1].getProperty(BATCH_ID))
+		finished = batch_ret[0]
+		if finished == True or finished == -1:
+			if finished == -1:
+				luci_log.debug_verbose('NNFP2: batch error: %s' % batch_ret[1])
 			try:
 				nodefolder.manage_delObjects(item[0])
 			except Exception, e:
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/27 21:05:51	1.47
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/12/08 23:02:49	1.48
@@ -1,7 +1,7 @@
 import xml
 from time import time, ctime
 from xml.dom import minidom
-from ricci_communicator import RicciCommunicator
+from ricci_communicator import RicciCommunicator, extract_module_status
 from LuciSyslog import LuciSyslog
 
 try:
@@ -10,23 +10,33 @@
 	pass
 
 def checkBatch(rc, batch_id):
+	err_msg = 'An unknown Ricci error occurred on %s' % rc.hostname()
+
 	try:
 		batch = rc.batch_report(batch_id)
 		if batch is None:
-			return True
+			return (True, 'batch id was not found')
 	except:
-		return False
+		return (-1, err_msg)
 
 	try:
-		dummy = batch.getAttribute('batch_id')
-		result = batch.getAttribute('status')
+		code, new_err_msg = extract_module_status(batch, 1)
+		if new_err_msg:
+			err_msg = 'A Ricci error occurred on %s: %s' \
+				% (rc.hostname(), str(new_err_msg))
 	except:
-		return False
+		return (-1, err_msg)
 
-	if result == '0':
-		return True
+	# In progress.
+	if code == -101 or code == -102:
+		return (False, 'in progress')
+
+	# Done successfully.
+	if code == '0':
+		return (True, 'completed sucessfully')
 
-	return False
+	# Error
+	return (-1, err_msg)
 
 def addClusterNodeBatch(os_str,
 						cluster_name,



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-30 20:12 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-30 20:12 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	jparsons at sourceware.org	2006-11-30 20:12:38

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix for quorumd cluster props, part of bug #212021

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.11&r2=1.120.2.12

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/20 23:32:43	1.120.2.11
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/30 20:12:38	1.120.2.12
@@ -18,10 +18,11 @@
 from Vm import Vm
 from Script import Script
 from Samba import Samba
+from QuorumD import QuorumD
+from Heuristic import Heuristic
 from clusterOS import resolveOSType
 from FenceHandler import FenceHandler, FENCE_OPTS
 from GeneralError import GeneralError
-from UnknownClusterError import UnknownClusterError
 from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
 from LuciSyslog import LuciSyslog
 
@@ -588,8 +589,9 @@
 	except ValueError, e:
 		errors.append('An invalid Minimum Score value was given: %s' % str(e))
 
-  device = None
-  label = None  #Either device or label must be present
+	#Either device or label must be present
+	device = None
+	label = None
 	try:
 		device = form['device'].strip()
 		label = form['label'].strip()
@@ -652,26 +654,31 @@
 	if len(errors) > 0:
 		return (False, {'errors': errors })
 
-  qd = QuorumD()
-  qd.addAttribute('interval', interval)
-  qd.addAttribute('votes', votes)
-  qd.addAttribute('tko', tko)
-  qd.addAttribute('min_score', min_score)
-  if device:
-    qd.addAttribute('device', device)
-  else:
-    qd.addAttribute('label', label)
-  cp = model.getClusterPtr()
-  cp.addChild(qd)
+	qd = QuorumD()
+	qd.addAttribute('interval', interval)
+	qd.addAttribute('votes', votes)
+	qd.addAttribute('tko', tko)
+	qd.addAttribute('min_score', min_score)
+
+	if device:
+		qd.addAttribute('device', device)
+	else:
+		qd.addAttribute('label', label)
 
-  for h in heuristics:
-    new_h = Heuristic()
-    new_h.addAttribute('program', h[1])
-    new_h.addAttribute('interval', h[2])
-    new_h.addAttribute('score', h[3])
-    qd.addChild(new_h)
+	cp = model.getClusterPtr()
+	cp.addChild(qd)
 
-	return (True, {'messages': ['Changes accepted. - FILL ME IN']})
+	for h in heuristics:
+		new_h = Heuristic()
+		new_h.addAttribute('program', h[1])
+		new_h.addAttribute('interval', h[2])
+		new_h.addAttribute('score', h[3])
+		qd.addChild(new_h)
+
+	if len(errors) > 0:
+		return (False, {'errors': errors })
+
+	return (True, {})
 
 def validateGeneralConfig(model, form):
 	errors = list()
@@ -1018,7 +1025,7 @@
 
   try:
     pagetype = request[PAGETYPE]
-  except KeyError, e:
+  except:
     pagetype = '3'
 
 
@@ -1113,7 +1120,7 @@
 
   try:
     url = request['URL']
-  except KeyError, e:
+  except:
     url = "/luci/cluster/index_html"
 
   #The only way this method can run is if there exists
@@ -2811,8 +2818,13 @@
       svclist.append(thing)
 
   #Get cluster name and node name from request
-  clustername = request['clustername']
-  nodename = request['nodename']
+  try:
+    clustername = request['clustername']
+    nodename = request['nodename']
+  except Exception, e:
+    luci_log.debug_verbose('getNodeInfo0: %s' % str(e))
+    return {}
+
   #extract correct node line from cluster status
   found = False
   for item in status:
@@ -2820,7 +2832,9 @@
       found = True
       break
   if found == False:
-    raise UnknownClusterError("Fatal", "Unable to resolve node name in cluster status")
+    luci_log.debug_verbose('getNodeInfo1: Unable to resolve node name in cluster status')
+    return {}
+
   #Now determine state of node...
   if item['online'] == "false":
     nodestate = NODE_UNKNOWN
@@ -3047,8 +3061,21 @@
   raise
   
 def getFenceInfo(self, model, request):
-  clustername = request['clustername']
-  baseurl = request['URL']
+  try:
+    clustername = request['clustername']
+  except:
+    try:
+      clustername = request.form['clustername']
+    except:
+      luci_log.debug_verbose('getFenceInfo0: unable to determine cluster name')
+      return {}
+
+  try:
+    baseurl = request['URL']
+  except Exception, e:
+    luci_log.debug_verbose('getFenceInfo1: no request.URL')
+    return {}
+
   map = {}
   level1 = list() #First level fence devices
   level2 = list() #Second level fence devices
@@ -3064,8 +3091,13 @@
 
   try:
     nodename = request['nodename']
-  except KeyError, e:
-    raise GeneralError('FATAL', "Could not extract nodename from request")
+  except:
+    try:
+      nodename = request.form['nodename']
+    except:
+      luci_log.debug_verbose('getFenceInfo2: unable to extract nodename: %s' \
+          % str(e))
+      return {}
     
   #Here we need to get fences for a node - just the first two levels
   #Each level has its own list of fence devs used in that level
@@ -3075,7 +3107,8 @@
   try:
     node = model.retrieveNodeByName(nodename)
   except GeneralError, e:
-    raise GeneralError('FATAL', "Couldn't find node name in current node list")
+    luci_log.debug_verbose('getFenceInfo3: unabel to find node name %s in current node list' % (str(nodename), str(e)))
+    return {}
 
   fds = model.getFenceDevices()
 
@@ -3160,9 +3193,9 @@
             level1.append(fencedev)
             last_kid_fd = fd
             continue
+    map['level1'] = level1
 
     #level1 list is complete now, but it is still necessary to build shared1
-    sharednames = list()
     for fd in fds:
       isUnique = True
       if fd.isShared() == False:
@@ -3178,6 +3211,7 @@
         shared_struct['agent'] = agentname
         shared_struct['prettyname'] = FENCE_OPTS[agentname]
         shared1.append(shared_struct)
+    map['shared1'] = shared1
 
   #YUK: This next section violates the DRY rule, :-(
   if len_levels >= 2:
@@ -3254,9 +3288,9 @@
             level2.append(fencedev)
             last_kid_fd = fd
             continue
+    map['level2'] = level2
 
     #level2 list is complete but like above, we need to build shared2
-    sharednames = list()
     for fd in fds:
       isUnique = True
       if fd.isShared() == False:
@@ -3272,6 +3306,7 @@
         shared_struct['agent'] = agentname
         shared_struct['prettyname'] = FENCE_OPTS[agentname]
         shared2.append(shared_struct)
+    map['shared2'] = shared2
 
   return map    
       
@@ -3280,7 +3315,7 @@
   baseurl = request['URL']
   map = {}
   fencedevs = list() #This is for the fencedev list page
-  map['fencedevs'] = fencedevs
+
   #Get list of fence devices
   fds = model.getFenceDevices()
   nodes_used = list() #This section determines which nodes use the dev
@@ -3296,6 +3331,7 @@
         fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
       except:
         fencedev['pretty_name'] = fd.getAgentType()
+      fencedev['agent'] = fd.getAgentType()
       #Add config url for this fencedev
       fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV
 
@@ -3320,7 +3356,8 @@
 
       fencedev['nodesused'] = nodes_used
       fencedevs.append(fencedev)
-    
+
+  map['fencedevs'] = fencedevs
   return map
 
     



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-27 21:06 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-27 21:06 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-27 21:06:53

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	get rid of import of stale, deleted class

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.168&r2=1.169

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/27 21:05:51	1.168
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/27 21:06:53	1.169
@@ -23,7 +23,6 @@
 from clusterOS import resolveOSType
 from FenceHandler import FenceHandler, FENCE_OPTS
 from GeneralError import GeneralError
-from UnknownClusterError import UnknownClusterError
 from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
 from LuciSyslog import LuciSyslog
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-27 21:05 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-27 21:05 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-27 21:05:51

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 
Removed files:
	luci/site/luci/Extensions: RicciReceiveError.py 
	                           UnknownClusterError.py 

Log message:
	cleanups

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.167&r2=1.168
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.46&r2=1.47
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciReceiveError.py.diff?cvsroot=cluster&r1=1.1&r2=NONE
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/UnknownClusterError.py.diff?cvsroot=cluster&r1=1.1&r2=NONE

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/27 19:11:41	1.167
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/27 21:05:51	1.168
@@ -1026,7 +1026,7 @@
 
   try:
     pagetype = request[PAGETYPE]
-  except KeyError, e:
+  except:
     pagetype = '3'
 
 
@@ -1121,7 +1121,7 @@
 
   try:
     url = request['URL']
-  except KeyError, e:
+  except:
     url = "/luci/cluster/index_html"
 
   #The only way this method can run is if there exists
@@ -2819,8 +2819,13 @@
       svclist.append(thing)
 
   #Get cluster name and node name from request
-  clustername = request['clustername']
-  nodename = request['nodename']
+  try:
+    clustername = request['clustername']
+    nodename = request['nodename']
+  except Exception, e:
+    luci_log.debug_verbose('getNodeInfo0: %s' % str(e))
+    return {}
+
   #extract correct node line from cluster status
   found = False
   for item in status:
@@ -2828,7 +2833,9 @@
       found = True
       break
   if found == False:
-    raise UnknownClusterError("Fatal", "Unable to resolve node name in cluster status")
+    luci_log.debug_verbose('getNodeInfo1: Unable to resolve node name in cluster status')
+    return {}
+
   #Now determine state of node...
   if item['online'] == "false":
     nodestate = NODE_UNKNOWN
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/20 23:30:17	1.46
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/27 21:05:51	1.47
@@ -63,7 +63,7 @@
 	batch += '</function_call>'
 	batch += '</request>'
 	batch += '</module>'
-		
+
 	need_reboot = install_base or install_services or install_shared_storage or install_LVS
 	if need_reboot:
 		batch += '<module name="service">'
@@ -96,7 +96,7 @@
 		batch += '<function_call name="install"/>'
 		batch += '</request>'
 		batch += '</module>'
-		
+
 	batch += '<module name="cluster">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="set_cluster.conf">'
@@ -236,7 +236,7 @@
 		batch += '<cman expected_votes="1" two_node="1"/>'
 	else:
 		batch += '<cman/>'
- 
+
 	batch += '<fencedevices/>'
 	batch += '<rm/>'
 	batch += '</cluster>'
@@ -298,66 +298,39 @@
 		pass
 
 	return (None, None)
-	
 
-def getPayload(bt_node):
-	if not bt_node:
-		return None
-
-	mod_node = None
-	for node in bt_node.childNodes:
-		if node.nodeType == xml.dom.Node.ELEMENT_NODE and node.nodeName == 'module':
-			mod_node = node
-	if not mod_node:
-		return None
-
-	resp_node = None
-	for node in mod_node.childNodes:
-		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-			resp_node = node
-	if not resp_node:
-		return None
+def getClusterStatusBatch(rc):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
+	ricci_xml = rc.batch_run(batch_str, async=False)
 
-	fr_node = None
-	for node in resp_node.childNodes:
-		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-			fr_node = node
-	if not fr_node:
+	try:
+		cluster_tags = ricci_xml.getElementsByTagName('cluster')
+	except Exception, e:
+		luci_log.debug_verbose('getClusterStatusBatch0: %s' % str(e))
 		return None
 
-	varnode = None
-	for node in fr_node.childNodes:
-		if node.nodeName == 'var':
-			varnode = node
-			break
-	if not varnode:
-		return None
+	if len(cluster_tags) < 1:
+		luci_log.debug_verbose('getClusterStatusBatch1: %d entries - expecting 1' \
+			% len(cluster_tags))
+	elif len(cluster_tags) > 1:
+		luci_log.debug_verbose('getClusterStatusBatch2: %d entries - expecting 1, using the first' % len(cluster_tags))
 
-	cl_node = None
-	for node in varnode.childNodes:
-		if node.nodeName == 'cluster':
-			cl_node = node
-			break
-	if not cl_node:
+	try:
+		cluster_node = cluster_tags[0]
+		if not cluster_node:
+			raise Exception, 'element 0 is None'
+	except Exception, e:
+		luci_log.debug_verbose('getClusterStatusBatch3: %s' % str(e))
 		return None
 
-	doc = minidom.Document()
-	doc.appendChild(cl_node)
-	return doc
-
-def getClusterStatusBatch(rc):
-	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
-	ricci_xml = rc.batch_run(batch_str, async=False)
-
-	if not ricci_xml or not ricci_xml.firstChild:
-		luci_log.debug_verbose('ricci_xml is None from batch_run')
-		
-	doc = getPayload(ricci_xml.firstChild)
-	if not doc or not doc.firstChild:
-		luci_log.debug_verbose('doc is None from getPayload: %s' % ricci_xml.toxml())
-		return None
+	try:
+		doc = minidom.Document()
+		doc.appendChild(cluster_node)
+		return doc
+	except Exception, e:
+		luci_log.debug_verbose('getClusterStatusBatch4: %s' % str(e))
 
-	return doc
+	return None
 
 def setClusterConf(rc, clusterconf, propagate=True):
 	if propagate == True:
@@ -490,7 +463,7 @@
 		return None, None
 	ricci_xml = rc.batch_run(batch)
 	return batchAttemptResult(ricci_xml)
-	
+
 def restartService(rc, servicename):
 	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-27 18:15 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-27 18:15 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-27 18:15:31

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix indentation and import errors

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.165&r2=1.166

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/20 15:05:00	1.165
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/27 18:15:31	1.166
@@ -18,6 +18,8 @@
 from Vm import Vm
 from Script import Script
 from Samba import Samba
+from QuorumD import QuorumD
+from Heuristic import Heuristic
 from clusterOS import resolveOSType
 from FenceHandler import FenceHandler, FENCE_OPTS
 from GeneralError import GeneralError
@@ -588,8 +590,9 @@
 	except ValueError, e:
 		errors.append('An invalid Minimum Score value was given: %s' % str(e))
 
-  device = None
-  label = None  #Either device or label must be present
+	#Either device or label must be present
+	device = None
+	label = None
 	try:
 		device = form['device'].strip()
 		label = form['label'].strip()
@@ -652,26 +655,31 @@
 	if len(errors) > 0:
 		return (False, {'errors': errors })
 
-  qd = QuorumD()
-  qd.addAttribute('interval', interval)
-  qd.addAttribute('votes', votes)
-  qd.addAttribute('tko', tko)
-  qd.addAttribute('min_score', min_score)
-  if device:
-    qd.addAttribute('device', device)
-  else:
-    qd.addAttribute('label', label)
-  cp = model.getClusterPtr()
-  cp.addChild(qd)
+	qd = QuorumD()
+	qd.addAttribute('interval', interval)
+	qd.addAttribute('votes', votes)
+	qd.addAttribute('tko', tko)
+	qd.addAttribute('min_score', min_score)
+
+	if device:
+		qd.addAttribute('device', device)
+	else:
+		qd.addAttribute('label', label)
+
+	cp = model.getClusterPtr()
+	cp.addChild(qd)
 
-  for h in heuristics:
-    new_h = Heuristic()
-    new_h.addAttribute('program', h[1])
-    new_h.addAttribute('interval', h[2])
-    new_h.addAttribute('score', h[3])
-    qd.addChild(new_h)
+	for h in heuristics:
+		new_h = Heuristic()
+		new_h.addAttribute('program', h[1])
+		new_h.addAttribute('interval', h[2])
+		new_h.addAttribute('score', h[3])
+		qd.addChild(new_h)
 
-	return (True, {'messages': ['Changes accepted. - FILL ME IN']})
+	if len(errors) > 0:
+		return (False, {'errors': errors })
+
+	return (True, {})
 
 def validateGeneralConfig(model, form):
 	errors = list()
@@ -3047,8 +3055,13 @@
   raise
   
 def getFenceInfo(self, model, request):
-  clustername = request['clustername']
-  baseurl = request['URL']
+  try:
+    clustername = request['clustername']
+    baseurl = request['URL']
+  except Exception, e:
+    luci_log.debug_verbose('getFenceInfo0: error: %s' % str(e))
+    return {}
+
   map = {}
   level1 = list() #First level fence devices
   level2 = list() #Second level fence devices
@@ -3065,7 +3078,9 @@
   try:
     nodename = request['nodename']
   except KeyError, e:
-    raise GeneralError('FATAL', "Could not extract nodename from request")
+    luci_log.debug_verbose('getFenceInfo1: unable to extract nodename: %s' \
+        % str(e))
+    return {}
     
   #Here we need to get fences for a node - just the first two levels
   #Each level has its own list of fence devs used in that level
@@ -3075,7 +3090,8 @@
   try:
     node = model.retrieveNodeByName(nodename)
   except GeneralError, e:
-    raise GeneralError('FATAL', "Couldn't find node name in current node list")
+    luci_log.debug_verbose('getFenceInfo2: unabel to find node name %s in current node list' % (str(nodename), str(e)))
+    return {}
 
   fds = model.getFenceDevices()
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-20 23:32 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-20 23:32 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2006-11-20 23:32:43

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           ricci_communicator.py 

Log message:
	the domU code in ricci_communicator isn't needed anymore

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.10&r2=1.120.2.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.5&r2=1.9.2.6

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/17 05:50:09	1.120.2.10
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/20 23:32:43	1.120.2.11
@@ -588,19 +588,15 @@
 	except ValueError, e:
 		errors.append('An invalid Minimum Score value was given: %s' % str(e))
 
+  device = None
+  label = None  #Either device or label must be present
 	try:
 		device = form['device'].strip()
-		if not device:
-			raise KeyError, 'device is none'
-	except KeyError, e:
-		errors.append('No Device value was given')
-
-	try:
 		label = form['label'].strip()
-		if not label:
-			raise KeyError, 'label is none'
+		if not device and not label:
+			raise KeyError, 'device and label are both none'
 	except KeyError, e:
-		errors.append('No Label value was given')
+		errors.append('No Device or Label value was given')
 
 	num_heuristics = 0
 	try:
@@ -655,6 +651,26 @@
 
 	if len(errors) > 0:
 		return (False, {'errors': errors })
+
+  qd = QuorumD()
+  qd.addAttribute('interval', interval)
+  qd.addAttribute('votes', votes)
+  qd.addAttribute('tko', tko)
+  qd.addAttribute('min_score', min_score)
+  if device:
+    qd.addAttribute('device', device)
+  else:
+    qd.addAttribute('label', label)
+  cp = model.getClusterPtr()
+  cp.addChild(qd)
+
+  for h in heuristics:
+    new_h = Heuristic()
+    new_h.addAttribute('program', h[1])
+    new_h.addAttribute('interval', h[2])
+    new_h.addAttribute('score', h[3])
+    qd.addChild(new_h)
+
 	return (True, {'messages': ['Changes accepted. - FILL ME IN']})
 
 def validateGeneralConfig(model, form):
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/20 22:12:06	1.9.2.5
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/20 23:32:43	1.9.2.6
@@ -62,7 +62,6 @@
         self.__reported_hostname = hello.firstChild.getAttribute('hostname')
         self.__os = hello.firstChild.getAttribute('os')
         self.__dom0 = hello.firstChild.getAttribute('xen_host') == 'true'
-        self.__domU = hello.firstChild.getAttribute('xen_guest') == 'true'
 
         pass
     
@@ -91,11 +90,6 @@
         luci_log.debug_verbose('RC:dom0: [auth %d] reported system_name = %s for %s' \
             % (self.__authed, self.__dom0, self.__hostname))
         return self.__dom0
-    def domU(self):
-        luci_log.debug_verbose('RC:domU: [auth %d] reported system_name = %s for %s' \
-            % (self.__authed, self.__domU, self.__hostname))
-        return self.__domU
-    
     
     def auth(self, password):
         if self.authed():
@@ -364,8 +358,6 @@
     return ricci.os()
 def ricci_get_dom0(self, ricci):
     return ricci.dom0()
-def ricci_get_domU(self, ricci):
-    return ricci.domU()
 def ricci_get_cluster_info(self, ricci):
     return ricci.cluster_info()
 def ricci_get_authenticated(self, ricci):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-20 15:05 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-20 15:05 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-11-20 15:05:00

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix for quorumd cluster props, part of bug #212021

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.164&r2=1.165

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/17 05:48:27	1.164
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/20 15:05:00	1.165
@@ -588,19 +588,15 @@
 	except ValueError, e:
 		errors.append('An invalid Minimum Score value was given: %s' % str(e))
 
+  device = None
+  label = None  #Either device or label must be present
 	try:
 		device = form['device'].strip()
-		if not device:
-			raise KeyError, 'device is none'
-	except KeyError, e:
-		errors.append('No Device value was given')
-
-	try:
 		label = form['label'].strip()
-		if not label:
-			raise KeyError, 'label is none'
+		if not device and not label:
+			raise KeyError, 'device and label are both none'
 	except KeyError, e:
-		errors.append('No Label value was given')
+		errors.append('No Device or Label value was given')
 
 	num_heuristics = 0
 	try:
@@ -655,6 +651,26 @@
 
 	if len(errors) > 0:
 		return (False, {'errors': errors })
+
+  qd = QuorumD()
+  qd.addAttribute('interval', interval)
+  qd.addAttribute('votes', votes)
+  qd.addAttribute('tko', tko)
+  qd.addAttribute('min_score', min_score)
+  if device:
+    qd.addAttribute('device', device)
+  else:
+    qd.addAttribute('label', label)
+  cp = model.getClusterPtr()
+  cp.addChild(qd)
+
+  for h in heuristics:
+    new_h = Heuristic()
+    new_h.addAttribute('program', h[1])
+    new_h.addAttribute('interval', h[2])
+    new_h.addAttribute('score', h[3])
+    qd.addChild(new_h)
+
 	return (True, {'messages': ['Changes accepted. - FILL ME IN']})
 
 def validateGeneralConfig(model, form):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-17  5:50 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-17  5:50 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2006-11-17 05:50:10

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix fence bug

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.9&r2=1.120.2.10

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/16 19:34:53	1.120.2.9
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/17 05:50:09	1.120.2.10
@@ -3246,7 +3246,7 @@
       if fd.isShared() == False:
         continue
       for fdev in level2:
-        if fd.getName.strip() == fdev['name']:
+        if fd.getName().strip() == fdev['name']:
           isUnique = False
           break
       if isUnique == True:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-17  5:48 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-17  5:48 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-17 05:48:27

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix fence bug

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.163&r2=1.164

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/13 21:40:55	1.163
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/17 05:48:27	1.164
@@ -3246,7 +3246,7 @@
       if fd.isShared() == False:
         continue
       for fdev in level2:
-        if fd.getName.strip() == fdev['name']:
+        if fd.getName().strip() == fdev['name']:
           isUnique = False
           break
       if isUnique == True:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-10 18:18 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-10 18:18 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-10 18:18:09

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	better debugging output

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.159&r2=1.160

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/10 17:59:58	1.159
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/10 18:18:09	1.160
@@ -1505,15 +1505,15 @@
 	try:
 		clusterfolder = self.restrictedTraverse(path)
 		if not clusterfolder:
-			luci_log.debug('GRA: cluster folder %s for %s is missing.' \
+			luci_log.debug('GRA0: cluster folder %s for %s is missing.' \
 				% (path, clustername))
 			raise Exception, 'no cluster folder at %s' % path
 		nodes = clusterfolder.objectItems('Folder')
 		if len(nodes) < 1:
-			luci_log.debug('GRA: no cluster nodes for %s found.' % clustername)
+			luci_log.debug('GRA1: no cluster nodes for %s found.' % clustername)
 			raise Exception, 'no cluster nodes were found at %s' % path
 	except Exception, e:
-		luci_log.debug('GRA: cluster folder %s for %s is missing: %s.' \
+		luci_log.debug('GRA2: cluster folder %s for %s is missing: %s.' \
 			% (path, clustername, str(e)))
 		return None
 
@@ -1531,17 +1531,17 @@
 		try:
 			rc = RicciCommunicator(hostname)
 		except RicciError, e:
-			luci_log.debug('GRA: ricci error: %s' % str(e))
+			luci_log.debug('GRA3: ricci error: %s' % str(e))
 			continue
 
 		try:
 			clu_info = rc.cluster_info()
 		except Exception, e:
-			luci_log.debug('GRA: cluster_info error: %s' % str(e))
+			luci_log.debug('GRA4: cluster_info error: %s' % str(e))
 
 		if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
 			try:
-				luci_log.debug('GRA: %s reports it\'s in cluster %s:%s; we expect %s' \
+				luci_log.debug('GRA5: %s reports it\'s in cluster %s:%s; we expect %s' \
 					 % (hostname, clu_info[0], clu_info[1], cluname))
 				setNodeFlag(self, node, CLUSTER_NODE_NOT_MEMBER)
 			except:
@@ -1555,20 +1555,28 @@
 		except:
 			pass
 
-	luci_log.debug('GRA: no ricci agent could be found for cluster %s' % cluname)
+	luci_log.debug('GRA6: no ricci agent could be found for cluster %s' \
+		% cluname)
 	return None
 
 def getRicciAgentForCluster(self, req):
+	clustername = None
 	try:
 		clustername = req['clustername']
-	except KeyError, e:
+		if not clustername:
+			clustername = None
+			raise
+	except:
 		try:
 			clustername = req.form['clusterName']
 			if not clustername:
-				raise
+				clustername = None
 		except:
-			luci_log.debug('no cluster name was specified in getRicciAgentForCluster')
-			return None
+			pass
+
+	if clustername is None:
+		luci_log.debug('GRAFC0: no cluster name was found')
+		return None
 	return getRicciAgent(self, clustername)
 
 def getClusterStatus(self, rc):
@@ -4322,7 +4330,7 @@
 		if not mb_nodes or not len(mb_nodes):
 			raise Exception, 'node list is empty'
 	except Exception, e:
-		luci_log.debug_verbose('no model builder nodes found for %s: %s' \
+		luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %s' \
 				% (str(e), clusterName))
 		return 'Unable to find cluster nodes for %s' % clusterName
 
@@ -4331,17 +4339,18 @@
 		if not cluster_node:
 			raise Exception, 'cluster node is none'
 	except Exception, e:
-		luci_log.debug('cant find cluster node for %s: %s'
+		luci_log.debug('RCC1: cant find cluster node for %s: %s'
 			% (clusterName, str(e)))
 		return 'Unable to find an entry for %s in the Luci database.' % clusterName
 
 	try:
 		db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
 		if not db_nodes or not len(db_nodes):
-			raise
-	except:
+			raise Exception, 'no database nodes'
+	except Exception, e:
 		# Should we just create them all? Can this even happen?
-		return 'Unable to find database entries for any nodes in ' + clusterName
+		luci_log.debug('RCC2: error: %s' % str(e))
+		return 'Unable to find database entries for any nodes in %s' % clusterName
 
 	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
 
@@ -4368,11 +4377,15 @@
 
 	messages = list()
 	for i in missing_list:
-		cluster_node.delObjects([i])
-		## or alternately
-		#new_node = cluster_node.restrictedTraverse(i)
-		#setNodeFlag(self, new_node, CLUSTER_NODE_NOT_MEMBER)
-		messages.append('Node \"' + i + '\" is no longer in a member of cluster \"' + clusterName + '.\". It has been deleted from the management interface for this cluster.')
+		try:
+			## or alternately
+			##new_node = cluster_node.restrictedTraverse(i)
+			##setNodeFlag(self, new_node, CLUSTER_NODE_NOT_MEMBER)
+			cluster_node.delObjects([i])
+			messages.append('Node \"%s\" is no longer in a member of cluster \"%s\." It has been deleted from the management interface for this cluster.' % (i, clusterName))
+			luci_log.debug_verbose('VCC3: deleted node %s' % i)
+		except Exception, e:
+			luci_log.debug_verbose('VCC4: delObjects: %s: %s' % (i, str(e)))
 
 	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
 	for i in new_list:
@@ -4380,9 +4393,11 @@
 			cluster_node.manage_addFolder(i, '__luci__:csystem:' + clusterName)
 			new_node = cluster_node.restrictedTraverse(i)
 			setNodeFlag(self, new_node, new_flags)
-			messages.append('A new node, \"' + i + ',\" is now a member of cluster \"' + clusterName + '.\" It has added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.')
-		except:
-			messages.append('A new node, \"' + i + ',\" is now a member of cluster \"' + clusterName + ',\". but has not added to the management interface for this cluster as a result of an error creating the database entry.')
+			messages.append('A new cluster node, \"%s,\" is now a member of cluster \"%s.\" It has been added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.' % (i, clusterName))
+		except Exception, e:
+			messages.append('A new cluster node, \"%s,\" is now a member of cluster \"%s,\". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clusterName))
+			luci_log.debug_verbose('VCC5: addFolder: %s/%s: %s' \
+				% (clusterName, i, str(e)))
 	
 	return messages
 
@@ -4465,7 +4480,7 @@
 		clusterfolder = self.restrictedTraverse(path)
 		objs = clusterfolder.objectItems('Folder')
 	except Exception, e:
-		luci_log.info('resolve_nodename failed for %s/%s: %s' \
+		luci_log.info('RNN0: error for %s/%s: %s' \
 			% (nodename, clustername, str(e)))
 		return nodename
 
@@ -4476,14 +4491,15 @@
 		except:
 			continue
 
-	luci_log.info('resolve_nodename failed for %s/%s' % (nodename, clustername))
+	luci_log.info('RNN1: failed for %s/%s: nothing found' \
+		% (nodename, clustername))
 	return nodename
 
 def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
 	try:
 		items = nodefolder.objectItems('ManagedSystem')
 	except:
-		luci_log.debug('An error occurred while trying to list flags for cluster ' + nodefolder[0])
+		luci_log.debug('NNFP0: error getting flags for %s' % nodefolder[0])
 		return None
 
 	for item in items:
@@ -4494,8 +4510,8 @@
 		try:
 			# hostname must be a FQDN
 			rc = RicciCommunicator(hostname)
-		except RicciError, e:
-			luci_log.info('Unable to connect to the ricci daemon: %s' % str(e))
+		except Exception, e:
+			luci_log.info('NNFP1: ricci error %s: %s' % (hostname, str(e)))
 			return None
 
 		if not rc.authed():
@@ -4504,15 +4520,14 @@
 				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
 			except:
 				pass
-			luci_log.info('Node %s is not authenticated' % item[0])
-			return None
+			luci_log.info('NNFP2: %s not authenticated' % item[0])
 
 		finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
 		if finished == True:
 			try:
 				nodefolder.manage_delObjects(item[0])
 			except Exception, e:
-				luci_log.info('manage_delObjects for %s failed: %s' \
+				luci_log.info('NNFP3: manage_delObjects for %s failed: %s' \
 					% (item[0], str(e)))
 				return None
 			return True
@@ -4577,7 +4592,7 @@
 		flag.manage_addProperty(TASKTYPE, task, 'string')
 		flag.manage_addProperty(FLAG_DESC, desc, 'string')
 	except Exception, e:
-		errmsg = 'Error creating flag (%s,%s,%s) at %s: %s' \
+		errmsg = 'SNF0: error creating flag (%s,%s,%s)@%s: %s' \
 					% (batch_id, task, desc, objpath, str(e))
 		luci_log.debug_verbose(errmsg)
 		raise Exception, errmsg



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-10 17:59 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-10 17:59 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-10 17:59:58

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	cleanups for setting busy flags

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.158&r2=1.159

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/09 22:30:39	1.158
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/10 17:59:58	1.159
@@ -236,24 +236,33 @@
 	response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
 
 def buildClusterCreateFlags(self, batch_map, clusterName):
-  path = str(CLUSTER_FOLDER_PATH + clusterName)
-  clusterfolder = self.restrictedTraverse(path)
-  for key in batch_map.keys():
-    key = str(key)
-    batch_id = batch_map[key]
-    batch_id = str(batch_id)
-    objname = str(key + "____flag") #This suffix needed to avoid name collision
-    clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-    #now designate this new object properly
-    objpath = str(path + "/" + objname)
-    flag = self.restrictedTraverse(objpath)
-    #flag[BATCH_ID] = batch_id
-    #flag[TASKTYPE] = CLUSTER_ADD
-    #flag[FLAG_DESC] = "Creating node " + key + " for cluster " + clusterName
-    flag.manage_addProperty(BATCH_ID, batch_id, "string")
-    flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
-    flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
-    flag.manage_addProperty(LAST_STATUS, 0, "int")
+	path = str(CLUSTER_FOLDER_PATH + clusterName)
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+	except Exception, e:
+		luci_log.debug_verbose('buildCCF0: no cluster folder at %s' % path)
+		return None
+
+	for key in batch_map.keys():
+		try:
+			key = str(key)
+			batch_id = str(batch_map[key])
+			#This suffix needed to avoid name collision
+			objname = str(key + "____flag")
+
+			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#now designate this new object properly
+			objpath = str(path + "/" + objname)
+			flag = self.restrictedTraverse(objpath)
+
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
+			flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
+			flag.manage_addProperty(LAST_STATUS, 0, "int")
+		except Exception, e:
+			luci_log.debug_verbose('buildCCF1: error creating flag for %s: %s' \
+				% (key, str(e)))
 
 def validateAddClusterNode(self, request):
 	errors = list()
@@ -1770,22 +1779,27 @@
 	return child_depth + 1
 
 def serviceStart(self, rc, req):
+	svcname = None
 	try:
 		svcname = req['servicename']
-	except KeyError, e:
+	except:
 		try:
 			svcname = req.form['servicename']
 		except:
-			luci_log.debug_verbose('serviceStart error: no service name')
-			return None
+			pass
 
+	if svcname is None:
+		luci_log.debug_verbose('serviceStart0: no service name')
+		return None
+
+	nodename = None
 	try:
 		nodename = req['nodename']
-	except KeyError, e:
+	except:
 		try:
 			nodename = req.form['nodename']
 		except:
-			nodename = None
+			pass
 
 	cluname = None
 	try:
@@ -1797,52 +1811,38 @@
 			pass
 
 	if cluname is None:
-		luci_log.debug_verbose('serviceStart error: %s no service name' \
+		luci_log.debug_verbose('serviceStart2: no cluster name for svc %s' \
 			% svcname)
 		return None
 
-	ricci_agent = rc.hostname()
-
 	batch_number, result = startService(rc, svcname, nodename)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('startService %s call failed' \
-			% svcname)
+		luci_log.debug_verbose('startService3: SS(%s,%s,%s) call failed' \
+			% (svcname, cluname, nodename))
 		return None
 
-	#Now we need to create a DB flag for this system.
-	path = str(CLUSTER_FOLDER_PATH + cluname)
-	batch_id = str(batch_number)
-	objname = str(ricci_agent + "____flag")
-
 	try:
-		clusterfolder = self.restrictedTraverse(path)
-		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = str(path + "/" + objname)
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batch_id, "string")
-		flag.manage_addProperty(TASKTYPE, SERVICE_START, "string")
-		flag.manage_addProperty(FLAG_DESC, "Starting service \'" + svcname + "\'", "string")
+		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_START, "Starting service \'%s\'" % svcname)
 	except Exception, e:
-		luci_log.debug_verbose('Error creating flag at %s: %s' % (objpath, str(e)))
+		luci_log.debug_verbose('startService4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
 
 	response = req.RESPONSE
 	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def serviceRestart(self, rc, req):
+	svcname = None
 	try:
 		svcname = req['servicename']
-	except KeyError, e:
+	except:
 		try:
 			svcname = req.form['servicename']
 		except:
-			luci_log.debug_verbose('no service name for serviceRestart')
-			return None
-	except:
-		luci_log.debug_verbose('no service name for serviceRestart')
+			pass
+
+	if svcname is None:
+		luci_log.debug_verbose('serviceRestart0: no service name')
 		return None
 
-	#Now we need to create a DB flag for this system.
 	cluname = None
 	try:
 		cluname = req['clustername']
@@ -1853,51 +1853,36 @@
 			pass
 
 	if cluname is None:
-		luci_log.debug_verbose('unable to determine cluser name for serviceRestart %s' % svcname)
+		luci_log.debug_verbose('serviceRestart1: no cluster for %s' % svcname)
 		return None
 
 	batch_number, result = restartService(rc, svcname)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('restartService for %s failed' % svcname)
+		luci_log.debug_verbose('serviceRestart2: %s failed' % svcname)
 		return None
 				
-	ricci_agent = rc.hostname()
-
-	path = str(CLUSTER_FOLDER_PATH + cluname)
-	batch_id = str(batch_number)
-	objname = str(ricci_agent + "____flag")
-
 	try:
-		clusterfolder = self.restrictedTraverse(path)
-		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-
-		#Now we need to annotate the new DB object
-		objpath = str(path + "/" + objname)
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batch_id, "string")
-		flag.manage_addProperty(TASKTYPE, SERVICE_RESTART, "string")
-		flag.manage_addProperty(FLAG_DESC, "Restarting service " + svcname, "string")
+		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_RESTART, "Restarting service \'%s\'" % svcname)
 	except Exception, e:
-		luci_log.debug_verbose('Error creating flag in restartService %s: %s' \
-			% (svcname, str(e)))
+		luci_log.debug_verbose('serviceRestart3: error setting flags for service %s for cluster %s' % (svcname, cluname))
 
 	response = req.RESPONSE
 	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def serviceStop(self, rc, req):
+	svcname = None
 	try:
 		svcname = req['servicename']
-	except KeyError, e:
+	except:
 		try:
 			svcname = req.form['servicename']
 		except:
-			luci_log.debug_verbose('no service name for serviceStop')
-			return None
-	except:
-		luci_log.debug_verbose('no service name for serviceStop')
+			pass
+
+	if svcname is None:
+		luci_log.debug_verbose('serviceStop0: no service name')
 		return None
 
-	#Now we need to create a DB flag for this system.
 	cluname = None
 	try:
 		cluname = req['clustername']
@@ -1908,34 +1893,18 @@
 			pass
 
 	if cluname is None:
-		luci_log.debug_verbose('unable to determine cluser name for serviceStop %s' % svcname)
+		luci_log.debug_verbose('serviceStop1: no cluster name for %s' % svcname)
 		return None
 
 	batch_number, result = stopService(rc, svcname)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('stopService for %s failed' % svcname)
+		luci_log.debug_verbose('serviceStop2: stop %s failed' % svcname)
 		return None
 
-	ricci_agent = rc.hostname()
-
-	path = str(CLUSTER_FOLDER_PATH + cluname)
-	batch_id = str(batch_number)
-	objname = str(ricci_agent + "____flag")
-
 	try:
-		clusterfolder = self.restrictedTraverse(path)
-		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = str(path + "/" + objname)
-		flag = self.restrictedTraverse(objpath)
-
-		flag.manage_addProperty(BATCH_ID, batch_id, "string")
-		flag.manage_addProperty(TASKTYPE, SERVICE_STOP, "string")
-		flag.manage_addProperty(FLAG_DESC, "Stopping service " + svcname, "string")
-		time.sleep(2)
+		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_STOP, "Stopping service \'%s\'" % svcname)
 	except Exception, e:
-		luci_log.debug_verbose('Error creating flags for stopService %s: %s' \
-			% (svcname, str(e)))
+		luci_log.debug_verbose('serviceStop3: error setting flags for service %s for cluster %s' % (svcname, cluname))
 
 	response = req.RESPONSE
 	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
@@ -2301,7 +2270,7 @@
 		return None
 
 	try:
-		set_node_flag(self, clustername, rc.hostname(), batch_number, NODE_LEAVE_CLUSTER, "Node \'%s\' leaving cluster" % nodename_resolved)
+		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_LEAVE_CLUSTER, "Node \'%s\' leaving cluster" % nodename_resolved)
 	except Exception, e:
 		luci_log.debug_verbose('NL4: failed to set flags: %s' % str(e))
 	return True
@@ -2313,7 +2282,7 @@
 		return None
 
 	try:
-		set_node_flag(self, clustername, rc.hostname(), batch_number, NODE_JOIN_CLUSTER, "Node \'%s\' joining cluster" % nodename_resolved)
+		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_JOIN_CLUSTER, "Node \'%s\' joining cluster" % nodename_resolved)
 	except Exception, e:
 		luci_log.debug_verbose('NJ1: failed to set flags: %s' % str(e))
 	return True
@@ -2391,7 +2360,7 @@
 		return None
 
 	try:
-		set_node_flag(self, clustername, rc.hostname(), batch_number, NODE_REBOOT, "Node \'%s\' is being rebooted" % nodename_resolved)
+		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_REBOOT, "Node \'%s\' is being rebooted" % nodename_resolved)
 	except Exception, e:
 		luci_log.debug_verbose('FNR1: failed to set flags: %s' % str(e))
 	return True
@@ -2457,7 +2426,7 @@
 		return None
 
 	try:
-		set_node_flag(self, clustername, rc.hostname(), batch_number, NODE_FENCE, "Node \'%s\' is being fenced" % nodename_resolved)
+		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_FENCE, "Node \'%s\' is being fenced" % nodename_resolved)
 	except Exception, e:
 		luci_log.debug_verbose('FNF4: failed to set flags: %s' % str(e))
 	return True
@@ -2577,7 +2546,7 @@
 			% (del_path, str(e)))
 
 	try:
-		set_node_flag(self, clustername, rc2.hostname(), batch_number, NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
+		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
 	except Exception, e:
 		luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
 	return True
@@ -3686,41 +3655,47 @@
 				continue
 
 def delResource(self, rc, request):
-	errstr = 'An error occurred in while attempting to set the cluster.conf'
+	errstr = 'An error occurred while attempting to set the new cluster.conf'
 
 	try:
 		modelb = request.SESSION.get('model')
-	except:
-		luci_log.debug_verbose('delRes unable to extract model from SESSION')
+	except Exception, e:
+		luci_log.debug_verbose('delResource0: no model: %s' % str(e))
 		return errstr
 
+	name = None
 	try:
 		name = request['resourcename']
-	except KeyError, e:
+	except:
 		try:
 			name = request.form['resourcename']
 		except:
-			luci_log.debug_verbose('delRes missing resname %s' % str(e))
-			return errstr + ': ' + str(e)
-	except:
-		luci_log.debug_verbose('delRes missing resname')
-		return errstr + ': ' + str(e)
+			pass
+
+	if name is None:
+		luci_log.debug_verbose('delResource1: no resource name')
+		return errstr + ': no resource name was provided.'
 
+	clustername = None
 	try:
 		clustername = request['clustername']
-	except KeyError, e:
+	except:
 		try:
 			clustername = request.form['clustername']
 		except:
-			luci_log.debug_verbose('delRes missing cluster name')
-			return errstr + ': could not determine the cluster name.'
+			pass
+
+	if clustername is None:
+		luci_log.debug_verbose('delResource2: no cluster name for %s' % name)
+		return errstr + ': could not determine the cluster name.'
 
 	try:
 		ragent = rc.hostname()
 		if not ragent:
-			raise
-	except:
-		return errstr
+			raise Exception, 'unable to determine the hostname of the ricci agent'
+	except Exception, e:
+		luci_log.debug_verbose('delResource3: %s: %s' % (errstr, str(e)))
+		return errstr + ': could not determine the ricci agent hostname'
 
 	resPtr = modelb.getResourcesPtr()
 	resources = resPtr.getChildren()
@@ -3733,7 +3708,7 @@
 			break
 
 	if not found:
-		luci_log.debug_verbose('delRes cant find res %s' % name)
+		luci_log.debug_verbose('delResource4: cant find res %s' % name)
 		return errstr + ': the specified resource was not found.'
 
 	try:
@@ -3741,32 +3716,19 @@
 		if not conf:
 			raise Exception, 'model string is blank'
 	except Exception, e:
-		luci_log.debug_verbose('delRes: exportModelAsString failed: %s' % str(e))
+		luci_log.debug_verbose('delResource5: exportModelAsString failed: %s' \
+			% str(e))
 		return errstr
 
 	batch_number, result = setClusterConf(rc, str(conf))
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('delRes: missing batch and/or result from setClusterConf')
+		luci_log.debug_verbose('delResource6: missing batch and/or result')
 		return errstr
 
-	path = CLUSTER_FOLDER_PATH + str(clustername)
-	clusterfolder = self.restrictedTraverse(path)
-	batch_id = str(batch_number)
-	objname = str(ragent) + '____flag'
-	objpath = str(path + '/' + objname)
-
 	try:
-		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batch_id, "string")
-		flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
-		flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
+		set_node_flag(self, clustername, ragent, str(batch_number), RESOURCE_REMOVE, "Removing Resource \'%s\'" % request['resourcename'])
 	except Exception, e:
-		luci_log.debug('delRes: An error occurred while setting flag %s: %s' \
-			% (objname, str(e)))
-	except:
-		luci_log.debug('delRes: An error occurred while setting flag %s' % objname)
+		luci_log.debug_verbose('delResource7: failed to set flags: %s' % str(e))
 
 	response = request.RESPONSE
 	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -4427,57 +4389,52 @@
 def addResource(self, request, modelb, res, res_type):
 	clustername = modelb.getClusterName()
 	if not clustername:
-		raise Exception, 'cluster name from modelb.getClusterName() is blank'
+		luci_log.debug_verbose('addResource0: no cluname from mb')
+		return 'Unable to determine cluster name'
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
-		raise Exception, 'Unable to find a ricci agent for the %s cluster' % clustername
+		luci_log.debug_verbose('addResource1: unable to find a ricci agent for cluster %s' % clustername)
+		return 'Unable to find a ricci agent for the %s cluster' % clustername
 
-	modelb.getResourcesPtr().addChild(res)
+	try:
+		modelb.getResourcesPtr().addChild(res)
+	except Exception, e:
+		luci_log.debug_verbose('addResource2: adding the new resource failed: %s' % str(e))
+		return 'Unable to add the new resource'
 
 	try:
 		conf = modelb.exportModelAsString()
 		if not conf:
 			raise Exception, 'model string for %s is blank' % clustername
 	except Exception, e:
-		luci_log.debug_verbose('addResource: exportModelAsString err: %s' % str(e))
+		luci_log.debug_verbose('addResource3: exportModelAsString : %s' \
+			% str(e))
 		return 'An error occurred while adding this resource'
 
 	try:
 		ragent = rc.hostname()
 		if not ragent:
-			luci_log.debug_verbose('missing hostname')
+			luci_log.debug_verbose('addResource4: missing ricci hostname')
 			raise Exception, 'unknown ricci agent hostname'
-		luci_log.debug_verbose('SENDING NEW CLUSTER CONF: %s' % conf)
+
 		batch_number, result = setClusterConf(rc, str(conf))
 		if batch_number is None or result is None:
-			luci_log.debug_verbose('missing batch_number or result')
-			raise Exception, 'batch_number or results is None from setClusterConf'
+			luci_log.debug_verbose('addResource5: missing batch_number or result')
+			raise Exception, 'unable to save the new cluster configuration.'
 	except Exception, e:
+		luci_log.debug_verbose('addResource6: %s' % str(e))
 		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
 
-	path = str(CLUSTER_FOLDER_PATH + clustername)
-	clusterfolder = self.restrictedTraverse(path)
-	batch_id = str(batch_number)
-	objname = str(ragent + '____flag')
-	objpath = str(path + '/' + objname)
+	if res_type != 'ip':
+		res_name = res.attr_hash['name']
+	else:
+		res_name = res.attr_hash['address']
 
 	try:
-		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batch_id, "string")
-		flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
-
-		if res_type != 'ip':
-			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['name'] + "\'", "string")
-		else:
-			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+		set_node_flag(self, clustername, ragent, str(batch_number), RESOURCE_ADD, "Creating New Resource \'%s\'" % res_name)
 	except Exception, e:
-		try:
-			luci_log.info('Unable to create flag %s: %s' % (objpath, str(e)))
-		except:
-			pass
+		luci_log.debug_verbose('addResource7: failed to set flags: %s' % str(e))
 
 	response = request.RESPONSE
 	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-09 22:30 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-09 22:30 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-09 22:30:39

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix for a bug in the busy wait that's hit when a node we're waiting for becomes unreachable

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.157&r2=1.158

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/09 20:32:02	1.157
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/09 22:30:39	1.158
@@ -3420,20 +3420,17 @@
         rc = RicciCommunicator(ricci[0])
         if not rc:
           rc = None
-          raise RicciError, 'rc is None for %s' % ricci[0]
-      except RicciError, e:
+          luci_log.debug_verbose('ICB6b: rc is none')
+      except Exception, e:
         rc = None
         luci_log.debug_verbose('ICB7: ricci returned error in iCB for %s: %s' \
           % (cluname, str(e)))
-      except:
-        rc = None
-        luci_log.info('ICB8: ricci connection failed for cluster %s' % cluname)
 
       batch_id = None
       if rc is not None:
         try:
           batch_id = item[1].getProperty(BATCH_ID)
-          luci_log.debug_verbose('ICB8A: got batch_id %s from %s' \
+          luci_log.debug_verbose('ICB8: got batch_id %s from %s' \
               % (batch_id, item[0]))
         except Exception, e:
           try:
@@ -3545,8 +3542,18 @@
       node_report = {}
       node_report['isnodecreation'] = False
       ricci = item[0].split("____") #This removes the 'flag' suffix
-      rc = RicciCommunicator(ricci[0])
-      finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
+
+      try:
+        rc = RicciCommunicator(ricci[0])
+      except Exception, e:
+        rc = None
+        finished = False
+        luci_log.debug_verbose('ICB15: ricci error: %s: %s' \
+          % (ricci[0], str(e)))
+
+      if rc is not None:
+        finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
+
       if finished == True:
         flag_desc = item[1].getProperty(FLAG_DESC)
         if flag_desc is None:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-09 14:17 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-09 14:17 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-09 14:17:09

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix a fencing bug

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.155&r2=1.156

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/08 21:42:50	1.155
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/09 14:17:08	1.156
@@ -3021,7 +3021,7 @@
       if fd.isShared() == False:
         continue
       for fdev in level1:
-        if fd.getName.strip() == fdev['name']:
+        if fd.getName().strip() == fdev['name']:
           isUnique = False
           break
       if isUnique == True:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-08 21:42 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-08 21:42 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-08 21:42:50

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	- fix a couple of typos in the fence device code
	- redirect to the busy page when modifying cluster params and deploying a new cluster

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.154&r2=1.155

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/08 15:52:41	1.154
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/08 21:42:50	1.155
@@ -233,8 +233,8 @@
 				return (False, {'errors': errors, 'requestResults':cluster_properties })
 		buildClusterCreateFlags(self, batch_id_map, clusterName)
 
-	messages.append('Creation of cluster \"' + clusterName + '\" has begun')
-	return (True, {'errors': errors, 'messages': messages })
+	response = request.RESPONSE
+	response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName)
 
 def buildClusterCreateFlags(self, batch_map, clusterName):
   path = str(CLUSTER_FOLDER_PATH + clusterName)
@@ -849,8 +849,11 @@
 
 	if len(errors) < 1:
 		messages.append('The cluster properties have been updated.')
+	else:
+		return (retcode, {'errors': errors, 'messages': messages})
 
-	return (retcode, {'errors': errors, 'messages': messages})
+	response = request.RESPONSE
+	response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 
 def validateFenceAdd(self, request):
 	return (True, {})
@@ -1260,19 +1263,6 @@
   kids.append(rvcfg)
   rv['children'] = kids
  ################################################################
-
-  cprop = {}
-  cprop['Title'] = 'Configure'
-  cprop['cfg_type'] = 'configuration paramters'
-  cprop['absolute_url'] = url + '?pagetype=' + CLUSTER_CONFIG + '&clustername=' + cluname
-  cprop['Description'] = 'Change cluster configuration parameters'
-  cprop['show_children'] = False
-  if pagetype == CLUSTER_CONFIG:
-    cprop['currentItem'] = True
-  else:
-    cprop['currentItem'] = False
-
- #################################################################
   fd = {}
   fd['Title'] = "Failover Domains"
   fd['cfg_type'] = "failoverdomains"
@@ -1416,7 +1406,6 @@
   mylist.append(nd)
   mylist.append(sv)
   mylist.append(rv)
-  mylist.append(cprop)
   mylist.append(fd)
   mylist.append(fen)
 
@@ -2892,6 +2881,8 @@
                   found_duplicate = True
               if found_duplicate == True:
                 continue
+              baseurl = request['URL']
+              clustername = model.getClusterName()
               node_hash = {}
               node_hash['nodename'] = node.getName().strip()
               node_hash['nodeurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
@@ -2963,7 +2954,7 @@
       if fd != None:
         if fd.isShared() == False:  #Not a shared dev...build struct and add
           fencedev = {}
-          fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+          fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
           fencedev['isShared'] = False
           fencedev['id'] = str(major_num)
           major_num = major_num + 1
@@ -3000,7 +2991,7 @@
             continue
           else: #Shared, but not used above...so we need a new fencedev struct
             fencedev = {}
-            fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+            fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
             fencedev['isShared'] = True
             fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV 
             fencedev['id'] = str(major_num)
@@ -3029,7 +3020,7 @@
       isUnique = True
       if fd.isShared() == False:
         continue
-      for fdev in level1
+      for fdev in level1:
         if fd.getName.strip() == fdev['name']:
           isUnique = False
           break
@@ -3038,7 +3029,7 @@
         shared_struct['name'] = fd.getName().strip()
         agentname = fd.getAgentType()
         shared_struct['agent'] = agentname
-        shared_struct['prettyname'] = FenceHandler.FENCE_OPTS[agentname]
+        shared_struct['prettyname'] = FENCE_OPTS[agentname]
         shared1.append(shared_struct)
 
   #YUK: This next section violates the DRY rule, :-(
@@ -3057,7 +3048,7 @@
       if fd != None:
         if fd.isShared() == False:  #Not a shared dev...build struct and add
           fencedev = {}
-          fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+          fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
           fencedev['isShared'] = False
           fencedev['id'] = str(major_num)
           major_num = major_num + 1
@@ -3094,7 +3085,7 @@
             continue
           else: #Shared, but not used above...so we need a new fencedev struct
             fencedev = {}
-            fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+            fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
             fencedev['isShared'] = True
             fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV 
             fencedev['id'] = str(major_num)
@@ -3123,7 +3114,7 @@
       isUnique = True
       if fd.isShared() == False:
         continue
-      for fdev in level2
+      for fdev in level2:
         if fd.getName.strip() == fdev['name']:
           isUnique = False
           break
@@ -3132,7 +3123,7 @@
         shared_struct['name'] = fd.getName().strip()
         agentname = fd.getAgentType()
         shared_struct['agent'] = agentname
-        shared_struct['prettyname'] = FenceHandler.FENCE_OPTS[agentname]
+        shared_struct['prettyname'] = FENCE_OPTS[agentname]
         shared2.append(shared_struct)
 
   return map    



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-08 15:52 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-08 15:52 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-11-08 15:52:41

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Finish task of separating fence info methods so that conditional arg need not be passed. Also some fixes

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.153&r2=1.154

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 21:33:52	1.153
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/08 15:52:41	1.154
@@ -2867,7 +2867,7 @@
 
 def getFence(self, model, request):
   map = {}
-  fencename = request['fencedevicename']
+  fencename = request['fencename']
   fencedevs = model.getFenceDevices()
   for fencedev in fencedevs:
     if fencedev.getName().strip() == fencename:
@@ -2877,9 +2877,37 @@
       except:
         map['pretty_name'] = fencedev.getAgentType()
 
+      nodes_used = list()
+      nodes = model.getNodes()
+      for node in nodes:
+        flevels = node.getFenceLevels()
+        for flevel in flevels: #These are the method blocks...
+          kids = flevel.getChildren()
+          for kid in kids: #These are actual devices in each level
+            if kid.getName().strip() == fencedev.getName().strip():
+              #See if this fd already has an entry for this node
+              found_duplicate = False
+              for item in nodes_used:
+                if item['nodename'] == node.getName().strip():
+                  found_duplicate = True
+              if found_duplicate == True:
+                continue
+              node_hash = {}
+              node_hash['nodename'] = node.getName().strip()
+              node_hash['nodeurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
+              nodes_used.append(node_hash)
+
+      map['nodesused'] = nodes_used
       return map
 
   return map
+
+def getFDForInstance(fds, name):
+  for fd in fds:
+    if fd.getName().strip() == name:
+      return fd
+
+  raise
   
 def getFenceInfo(self, model, request):
   clustername = request['clustername']
@@ -2894,7 +2922,8 @@
   map['shared1'] = shared1
   map['shared2'] = shared2
 
-  pass
+  major_num = 1
+  minor_num = 100
 
   try:
     nodename = request['nodename']
@@ -2911,70 +2940,200 @@
   except GeneralError, e:
     raise GeneralError('FATAL', "Couldn't find node name in current node list")
 
+  fds = model.getFenceDevices()
+
   levels = node.getFenceLevels()
   len_levels = len(levels)
 
   if len_levels == 0:
     return map
 
-  for i in xrange(2):
-    if not i in levels:
-      continue
-    fence_struct = {}
-    if levels[i] != None:
-      level = levels[i]
-    else:
-      #No more levels...
-      continue
-    kids = level.getChildren()
-    if len(kids) == 0:
-      continue
-    else:
-      #for each kid, 
-      ### resolve name, find fence device
-      ### Add fd to list, if it is not there yet 
-      ### determine if it is a shared fence type
-      ### if it is a shared device, add instance entry
-      fds = model.getFenceDevices()
-      fence_struct = None
-      for kid in kids:
-        name = kid.getName()
-        found_fd = False
-        if not i in map:
-          continue
-        for entry in map[i]:
-          if entry['name'] == name:
-            fence_struct = entry
-            found_fd = True
-            break
-        if found_fd == False:
-          for fd in fds:
-            if fd.getName() == name:  #Found the fence device
-              fence_struct = {}
-              fence_struct['isShareable'] = fd.isShared()
-              fd_attrs = fd.getAttributes()
-              kees = fd_attrs.keys()
-              for kee in kees:
-                fence_struct[kee] = fd_attrs[kee]
-        fi_attrs = kid.getAttributes()
-        kees = fi_attrs.keys()
-        if fence_struct['isShareable'] == True:
-          instance_struct = {}
+  if len_levels >= 1:
+    first_level = levels[0]
+    kids = first_level.getChildren()
+    last_kid_fd = None  #This is a marker for allowing multi instances
+                        #beneath a fencedev
+    for kid in kids:
+      instance_name = kid.getName().strip()
+      try:
+        fd = getFDForInstance(fds, instance_name)
+      except:
+        fd = None #Set to None in case last time thru loop
+        continue
+      if fd != None:
+        if fd.isShared() == False:  #Not a shared dev...build struct and add
+          fencedev = {}
+          fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+          fencedev['isShared'] = False
+          fencedev['id'] = str(major_num)
+          major_num = major_num + 1
+          devattrs = fd.getAttributes()
+          kees = devattrs.keys()
           for kee in kees:
-            instance_struct[kee] = fi_attrs[kee]
-            try:
-                check = fence_struct['instances']
-                check.append(instance_struct)
-            except KeyError, e:
-                fence_struct['instances'] = list()
-                fence_struct['instances'].append(instance_struct) 
-        else:  #Not a shareable fence device type
+            fencedev[kee] = devattrs[kee]
+          kidattrs = kid.getAttributes()
+          kees = kidattrs.keys()
           for kee in kees:
-            fence_struct[kee] = fi_attrs[kee]
-      if i == 0:
-        level1.append(fence_struct)      
-      else:
-        level2.append(fence_struct)      
+            if kee == "name":
+              continue #Don't duplicate name attr
+            fencedev[kee] = kidattrs[kee]
+          #This fencedev struct is complete, and needs to be placed on the 
+          #level1 Q. Because it is non-shared, we should set last_kid_fd
+          #to none.
+          last_kid_fd = None
+          level1.append(fencedev)
+        else:  #This dev is shared
+          if (last_kid_fd != None) and (fd.getName().strip() == last_kid_fd.getName().strip()):  #just append a new instance struct to last_kid_fd
+            instance_struct = {}
+            instance_struct['id'] = str(minor_num)
+            minor_num = minor_num + 1
+            kidattrs = kid.getAttributes()
+            kees = kidattrs.keys()
+            for kee in kees:
+              if kee == "name":
+                continue
+              instance_struct[kee] = kidattrs[kee]
+            #Now just add this struct to last_kid_fd and reset last_kid_fd
+            ilist = last_kid_fd['instance_list']
+            ilist.append(instance_struct)
+            last_kid_fd = fd
+            continue
+          else: #Shared, but not used above...so we need a new fencedev struct
+            fencedev = {}
+            fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+            fencedev['isShared'] = True
+            fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV 
+            fencedev['id'] = str(major_num)
+            major_num = major_num + 1
+            inlist = list()
+            fencedev['instance_list'] = inlist
+            devattrs = fd.getAttributes()
+            kees = devattrs.keys()
+            for kee in kees:
+              fencedev[kee] = devattrs[kee]
+            instance_struct = {}
+            kidattrs = kid.getAttributes()
+            kees = kidattrs.keys()
+            for kee in kees:
+              if kee == "name":
+                continue
+              instance_struct[kee] = kidattrs[kee]
+            inlist.append(instance_struct) 
+            level1.append(fencedev)
+            last_kid_fd = fd
+            continue
+
+    #level1 list is complete now, but it is still necessary to build shared1
+    sharednames = list()
+    for fd in fds:
+      isUnique = True
+      if fd.isShared() == False:
+        continue
+      for fdev in level1
+        if fd.getName.strip() == fdev['name']:
+          isUnique = False
+          break
+      if isUnique == True:
+        shared_struct = {}
+        shared_struct['name'] = fd.getName().strip()
+        agentname = fd.getAgentType()
+        shared_struct['agent'] = agentname
+        shared_struct['prettyname'] = FenceHandler.FENCE_OPTS[agentname]
+        shared1.append(shared_struct)
+
+  #YUK: This next section violates the DRY rule, :-(
+  if len_levels >= 2:
+    second_level = levels[1]
+    kids = second_level.getChildren()
+    last_kid_fd = None  #This is a marker for allowing multi instances
+                        #beneath a fencedev
+    for kid in kids:
+      instance_name = kid.getName().strip()
+      try:
+        fd = getFDForInstance(fds, instance_name)
+      except:
+        fd = None #Set to None in case last time thru loop
+        continue
+      if fd != None:
+        if fd.isShared() == False:  #Not a shared dev...build struct and add
+          fencedev = {}
+          fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+          fencedev['isShared'] = False
+          fencedev['id'] = str(major_num)
+          major_num = major_num + 1
+          devattrs = fd.getAttributes()
+          kees = devattrs.keys()
+          for kee in kees:
+            fencedev[kee] = devattrs[kee]
+          kidattrs = kid.getAttributes()
+          kees = kidattrs.keys()
+          for kee in kees:
+            if kee == "name":
+              continue #Don't duplicate name attr
+            fencedev[kee] = kidattrs[kee]
+          #This fencedev struct is complete, and needs to be placed on the 
+          #level2 Q. Because it is non-shared, we should set last_kid_fd
+          #to none.
+          last_kid_fd = None
+          level2.append(fencedev)
+        else:  #This dev is shared
+          if (last_kid_fd != None) and (fd.getName().strip() == last_kid_fd.getName().strip()):  #just append a new instance struct to last_kid_fd
+            instance_struct = {}
+            instance_struct['id'] = str(minor_num)
+            minor_num = minor_num + 1
+            kidattrs = kid.getAttributes()
+            kees = kidattrs.keys()
+            for kee in kees:
+              if kee == "name":
+                continue
+              instance_struct[kee] = kidattrs[kee]
+            #Now just add this struct to last_kid_fd and reset last_kid_fd
+            ilist = last_kid_fd['instance_list']
+            ilist.append(instance_struct)
+            last_kid_fd = fd
+            continue
+          else: #Shared, but not used above...so we need a new fencedev struct
+            fencedev = {}
+            fencedev['prettyname'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+            fencedev['isShared'] = True
+            fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV 
+            fencedev['id'] = str(major_num)
+            major_num = major_num + 1
+            inlist = list()
+            fencedev['instance_list'] = inlist
+            devattrs = fd.getAttributes()
+            kees = devattrs.keys()
+            for kee in kees:
+              fencedev[kee] = devattrs[kee]
+            instance_struct = {}
+            kidattrs = kid.getAttributes()
+            kees = kidattrs.keys()
+            for kee in kees:
+              if kee == "name":
+                continue
+              instance_struct[kee] = kidattrs[kee]
+            inlist.append(instance_struct) 
+            level2.append(fencedev)
+            last_kid_fd = fd
+            continue
+
+    #level2 list is complete but like above, we need to build shared2
+    sharednames = list()
+    for fd in fds:
+      isUnique = True
+      if fd.isShared() == False:
+        continue
+      for fdev in level2
+        if fd.getName.strip() == fdev['name']:
+          isUnique = False
+          break
+      if isUnique == True:
+        shared_struct = {}
+        shared_struct['name'] = fd.getName().strip()
+        agentname = fd.getAgentType()
+        shared_struct['agent'] = agentname
+        shared_struct['prettyname'] = FenceHandler.FENCE_OPTS[agentname]
+        shared2.append(shared_struct)
 
   return map    
       
@@ -2999,6 +3158,8 @@
         fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
       except:
         fencedev['pretty_name'] = fd.getAgentType()
+      #Add config url for this fencedev
+      fencedev['cfgurl'] = baseurl + "?clustername=" + clustername + "&fencename=" + fd.getName().strip() + "&pagetype=" + FENCEDEV
 
       nodes = model.getNodes()
       for node in nodes:
@@ -3016,7 +3177,7 @@
                 continue
               node_hash = {}
               node_hash['nodename'] = node.getName().strip()
-              node_hash['nodeurl'] = baseurl + "#fence" + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
+              node_hash['nodeurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
               nodes_used.append(node_hash)
 
       fencedev['nodesused'] = nodes_used



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-07 20:14 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-07 20:14 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-11-07 20:14:15

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	doh. remove debug lines

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.151&r2=1.152

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 20:13:17	1.151
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 20:14:15	1.152
@@ -2992,9 +2992,7 @@
         for flevel in flevels: #These are the method blocks...
           kids = flevel.getChildren()
           for kid in kids: #These are actual devices in each level
-            luci_log.debug_verbose('getFencesInfo: comparing >%s< and >%s<' % (kid.getName().strip(),fd.getName().strip()))
             if kid.getName().strip() == fd.getName().strip():
-              luci_log.debug_verbose('getFencesInfo: Found a match')
               #See if this fd already has an entry for this node
               found_duplicate = False
               for item in nodes_used:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-07 20:13 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-07 20:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-11-07 20:13:17

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	separation of fence info calls

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.150&r2=1.151

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 02:36:04	1.150
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 20:13:17	1.151
@@ -2867,11 +2867,10 @@
 
   return map
   
-def getFenceInfo(self, model, request, fornode = None):
+def getFenceInfo(self, model, request):
   clustername = request['clustername']
   baseurl = request['URL']
   map = {}
-  fencedevs = list() #This is for the fencedev list page
   level1 = list() #First level fence devices
   level2 = list() #Second level fence devices
   shared1 = list() #List of available sharable fence devs not used in level1
@@ -2880,132 +2879,90 @@
   map['level2'] = level2
   map['shared1'] = shared1
   map['shared2'] = shared2
-  map['fencedevs'] = fencedevs
-  nodename = ""
-  if fornode == None:  #this is being called by the fence device list page
-    #Get list of fence devices
-    fds = model.getFenceDevices()
-    for fd in fds:
-      #create fencedev hashmap
-      if fd.isShared() == True:
-        fencedev = {}
-        attr_hash = fd.getAttributes()
-        kees = attr_hash.keys()
-        for kee in kees:
-          fencedev[kee] = attr_hash[kee] #copy attrs over
-        try:
-          fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
-        except:
-          fencedev['pretty_name'] = fd.getAgentType()
-
-        nodes_used = list() #This section determines which nodes use the dev
-        nodes = model.getNodes()
-        for node in nodes:
-          flevels = node.getFenceLevels()
-          for flevel in flevels: #These are the method blocks...
-            kids = flevel.getChildren()
-            for kid in kids: #These are actual devices in each level
-              if kid.getName().strip == fd.getName().strip():
-                #See if this fd already has an entry for this node
-                found_duplicate = False
-                for item in nodes_used:
-                  if item['nodename'] == node.getName().strip():
-                    found_duplicate = True
-                if found_duplicate == True:
-                  continue
-                node_hash = {}
-                node_hash['nodename'] = node.getName().strip()
-                node_hash['nodeurl'] = baseurl + "#fence" + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
-                nodes_used.append(node_hash)
 
-        fencedev['nodesused'] = nodes_used
-      fencedevs.append(fencedev)
-      
-    return map
+  pass
 
-  else:
-    pass
-    try:
-      nodename = request['nodename']
-    except KeyError, e:
-      raise GeneralError('FATAL', "Could not extract nodename from request")
+  try:
+    nodename = request['nodename']
+  except KeyError, e:
+    raise GeneralError('FATAL', "Could not extract nodename from request")
     
-    #Here we need to get fences for a node - just the first two levels
-    #Each level has its own list of fence devs used in that level
-    #For each fence dev, a list of instance structs is appended
-    #In addition, for each level, a list of available but unused fence devs
-    #is returned. 
-    try:
-      node = model.retrieveNodeByName(nodename)
-    except GeneralError, e:
-      raise GeneralError('FATAL', "Couldn't find node name in current node list")
+  #Here we need to get fences for a node - just the first two levels
+  #Each level has its own list of fence devs used in that level
+  #For each fence dev, a list of instance structs is appended
+  #In addition, for each level, a list of available but unused fence devs
+  #is returned. 
+  try:
+    node = model.retrieveNodeByName(nodename)
+  except GeneralError, e:
+    raise GeneralError('FATAL', "Couldn't find node name in current node list")
 
-    levels = node.getFenceLevels()
-    len_levels = len(levels)
+  levels = node.getFenceLevels()
+  len_levels = len(levels)
 
-    if len_levels == 0:
-      return map
+  if len_levels == 0:
+    return map
 
-    for i in xrange(2):
-      if not i in levels:
-        continue
-      fence_struct = {}
-      if levels[i] != None:
-        level = levels[i]
-      else:
-        #No more levels...
-        continue
-      kids = level.getChildren()
-      if len(kids) == 0:
-        continue
+  for i in xrange(2):
+    if not i in levels:
+      continue
+    fence_struct = {}
+    if levels[i] != None:
+      level = levels[i]
+    else:
+      #No more levels...
+      continue
+    kids = level.getChildren()
+    if len(kids) == 0:
+      continue
+    else:
+      #for each kid, 
+      ### resolve name, find fence device
+      ### Add fd to list, if it is not there yet 
+      ### determine if it is a shared fence type
+      ### if it is a shared device, add instance entry
+      fds = model.getFenceDevices()
+      fence_struct = None
+      for kid in kids:
+        name = kid.getName()
+        found_fd = False
+        if not i in map:
+          continue
+        for entry in map[i]:
+          if entry['name'] == name:
+            fence_struct = entry
+            found_fd = True
+            break
+        if found_fd == False:
+          for fd in fds:
+            if fd.getName() == name:  #Found the fence device
+              fence_struct = {}
+              fence_struct['isShareable'] = fd.isShared()
+              fd_attrs = fd.getAttributes()
+              kees = fd_attrs.keys()
+              for kee in kees:
+                fence_struct[kee] = fd_attrs[kee]
+        fi_attrs = kid.getAttributes()
+        kees = fi_attrs.keys()
+        if fence_struct['isShareable'] == True:
+          instance_struct = {}
+          for kee in kees:
+            instance_struct[kee] = fi_attrs[kee]
+            try:
+                check = fence_struct['instances']
+                check.append(instance_struct)
+            except KeyError, e:
+                fence_struct['instances'] = list()
+                fence_struct['instances'].append(instance_struct) 
+        else:  #Not a shareable fence device type
+          for kee in kees:
+            fence_struct[kee] = fi_attrs[kee]
+      if i == 0:
+        level1.append(fence_struct)      
       else:
-        #for each kid, 
-        ### resolve name, find fence device
-        ### Add fd to list, if it is not there yet 
-        ### determine if it is a shared fence type
-        ### if it is a shared device, add instance entry
-        fds = model.getFenceDevices()
-        fence_struct = None
-        for kid in kids:
-          name = kid.getName()
-          found_fd = False
-          if not i in map:
-            continue
-          for entry in map[i]:
-            if entry['name'] == name:
-              fence_struct = entry
-              found_fd = True
-              break
-          if found_fd == False:
-            for fd in fds:
-              if fd.getName() == name:  #Found the fence device
-                fence_struct = {}
-                fence_struct['isShareable'] = fd.isShared()
-                fd_attrs = fd.getAttributes()
-                kees = fd_attrs.keys()
-                for kee in kees:
-                  fence_struct[kee] = fd_attrs[kee]
-          fi_attrs = kid.getAttributes()
-          kees = fi_attrs.keys()
-          if fence_struct['isShareable'] == True:
-            instance_struct = {}
-            for kee in kees:
-              instance_struct[kee] = fi_attrs[kee]
-              try:
-                  check = fence_struct['instances']
-                  check.append(instance_struct)
-              except KeyError, e:
-                  fence_struct['instances'] = list()
-                  fence_struct['instances'].append(instance_struct) 
-          else:  #Not a shareable fence device type
-            for kee in kees:
-              fence_struct[kee] = fi_attrs[kee]
-        if i == 0:
-          level1.append(fence_struct)      
-        else:
-          level2.append(fence_struct)      
+        level2.append(fence_struct)      
 
-    return map    
+  return map    
       
 def getFencesInfo(self, model, request):
   clustername = request['clustername']
@@ -3015,6 +2972,7 @@
   map['fencedevs'] = fencedevs
   #Get list of fence devices
   fds = model.getFenceDevices()
+  nodes_used = list() #This section determines which nodes use the dev
   for fd in fds:
     #create fencedev hashmap
     if fd.isShared() == True:
@@ -3028,14 +2986,15 @@
       except:
         fencedev['pretty_name'] = fd.getAgentType()
 
-      nodes_used = list() #This section determines which nodes use the dev
       nodes = model.getNodes()
       for node in nodes:
         flevels = node.getFenceLevels()
         for flevel in flevels: #These are the method blocks...
           kids = flevel.getChildren()
           for kid in kids: #These are actual devices in each level
-            if kid.getName().strip == fd.getName().strip():
+            luci_log.debug_verbose('getFencesInfo: comparing >%s< and >%s<' % (kid.getName().strip(),fd.getName().strip()))
+            if kid.getName().strip() == fd.getName().strip():
+              luci_log.debug_verbose('getFencesInfo: Found a match')
               #See if this fd already has an entry for this node
               found_duplicate = False
               for item in nodes_used:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-07  2:36 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-07  2:36 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-11-07 02:36:06

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	indentation error

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.149&r2=1.150

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 01:32:33	1.149
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 02:36:04	1.150
@@ -3049,7 +3049,7 @@
               nodes_used.append(node_hash)
 
       fencedev['nodesused'] = nodes_used
-    fencedevs.append(fencedev)
+      fencedevs.append(fencedev)
     
   return map
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-07  1:32 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-07  1:32 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-11-07 01:32:34

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	separate fence info methods

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.148&r2=1.149

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/06 23:55:23	1.148
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/07 01:32:33	1.149
@@ -2867,7 +2867,7 @@
 
   return map
   
-def getFenceInfo(self, model, request):
+def getFenceInfo(self, model, request, fornode = None):
   clustername = request['clustername']
   baseurl = request['URL']
   map = {}
@@ -2882,7 +2882,7 @@
   map['shared2'] = shared2
   map['fencedevs'] = fencedevs
   nodename = ""
-  if request == None:  #this is being called by the fence device list page
+  if fornode == None:  #this is being called by the fence device list page
     #Get list of fence devices
     fds = model.getFenceDevices()
     for fd in fds:
@@ -3007,6 +3007,53 @@
 
     return map    
       
+def getFencesInfo(self, model, request):
+  clustername = request['clustername']
+  baseurl = request['URL']
+  map = {}
+  fencedevs = list() #This is for the fencedev list page
+  map['fencedevs'] = fencedevs
+  #Get list of fence devices
+  fds = model.getFenceDevices()
+  for fd in fds:
+    #create fencedev hashmap
+    if fd.isShared() == True:
+      fencedev = {}
+      attr_hash = fd.getAttributes()
+      kees = attr_hash.keys()
+      for kee in kees:
+        fencedev[kee] = attr_hash[kee] #copy attrs over
+      try:
+        fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
+      except:
+        fencedev['pretty_name'] = fd.getAgentType()
+
+      nodes_used = list() #This section determines which nodes use the dev
+      nodes = model.getNodes()
+      for node in nodes:
+        flevels = node.getFenceLevels()
+        for flevel in flevels: #These are the method blocks...
+          kids = flevel.getChildren()
+          for kid in kids: #These are actual devices in each level
+            if kid.getName().strip == fd.getName().strip():
+              #See if this fd already has an entry for this node
+              found_duplicate = False
+              for item in nodes_used:
+                if item['nodename'] == node.getName().strip():
+                  found_duplicate = True
+              if found_duplicate == True:
+                continue
+              node_hash = {}
+              node_hash['nodename'] = node.getName().strip()
+              node_hash['nodeurl'] = baseurl + "#fence" + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
+              nodes_used.append(node_hash)
+
+      fencedev['nodesused'] = nodes_used
+    fencedevs.append(fencedev)
+    
+  return map
+
+    
 def getLogsForNode(self, request):
 	try:
 		nodename = request['nodename']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-06 23:55 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-06 23:55 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-06 23:55:23

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py ricci_bridge.py 
	                           ricci_communicator.py 

Log message:
	cleanups and fixes for config parameter propagation

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.147&r2=1.148
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.22&r2=1.23
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.40&r2=1.41
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.17&r2=1.18

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/06 20:21:04	1.147
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/06 23:55:23	1.148
@@ -1,11 +1,10 @@
 import socket
 from ModelBuilder import ModelBuilder
 from xml.dom import minidom
-from ZPublisher import HTTPRequest
 import AccessControl
 from conga_constants import *
 from ricci_bridge import *
-from ricci_communicator import *
+from ricci_communicator import RicciCommunicator, RicciError, batch_status, extract_module_status
 from string import lower
 import time
 import Products.ManagedSystem
@@ -19,13 +18,12 @@
 from Vm import Vm
 from Script import Script
 from Samba import Samba
-from FenceHandler import FenceHandler
 from clusterOS import resolveOSType
-from FenceHandler import FENCE_OPTS
+from FenceHandler import FenceHandler, FENCE_OPTS
 from GeneralError import GeneralError
 from UnknownClusterError import UnknownClusterError
 from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
-from LuciSyslog import LuciSyslogError, LuciSyslog
+from LuciSyslog import LuciSyslog
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -37,7 +35,7 @@
 
 try:
 	luci_log = LuciSyslog()
-except LuciSyslogError, e:
+except:
 	pass
 
 def validateClusterNodes(request, sessionData, clusterName, numStorage):
@@ -243,8 +241,8 @@
   clusterfolder = self.restrictedTraverse(path)
   for key in batch_map.keys():
     key = str(key)
-    id = batch_map[key]
-    batch_id = str(id)
+    batch_id = batch_map[key]
+    batch_id = str(batch_id)
     objname = str(key + "____flag") #This suffix needed to avoid name collision
     clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #now designate this new object properly
@@ -253,9 +251,9 @@
     #flag[BATCH_ID] = batch_id
     #flag[TASKTYPE] = CLUSTER_ADD
     #flag[FLAG_DESC] = "Creating node " + key + " for cluster " + clusterName
-    flag.manage_addProperty(BATCH_ID,batch_id, "string")
-    flag.manage_addProperty(TASKTYPE,CLUSTER_ADD, "string")
-    flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
+    flag.manage_addProperty(BATCH_ID, batch_id, "string")
+    flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
+    flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
     flag.manage_addProperty(LAST_STATUS, 0, "int")
 
 def validateAddClusterNode(self, request):
@@ -420,16 +418,18 @@
 			form_hash[form_parent] = {'form': None, 'kids': []}
 		form_hash[form_parent]['kids'].append(form_id)
 		dummy_form = {}
+
 		for i in ielems:
 			try:
-				type = str(i.getAttribute('type'))
+				input_type = str(i.getAttribute('type'))
 			except:
 				continue
-			if not type or type == 'button':
+			if not input_type or input_type == 'button':
 				continue
 			try:
 				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
-			except:
+			except Exception, e:
+				luci_log.debug_verbose('Error parsing service XML: %s' % str(e))
 				pass
 
 		try:
@@ -654,7 +654,7 @@
 	try:
 		cp = model.getClusterPtr()
 		old_name = model.getClusterAlias()
-		old_ver = cp.getConfigVersion()
+		old_ver = int(cp.getConfigVersion())
 	except Exception, e:
 		luci_log.debug_verbose('getConfigVersion: %s' % str(e))
 		errors.append('unable to determine the current configuration version')
@@ -682,7 +682,7 @@
 		try:
 			if cluster_name != old_name:
 				cp.addAttribute('alias', cluster_name)
-			model.setConfigVersion(version_num)
+			cp.setConfigVersion(str(version_num))
 		except Exception, e:
 			luci_log.debug_verbose('unable to update general properties: %s' % str(e))
 			errors.append('Unable to update the cluster configuration.')
@@ -741,21 +741,46 @@
 def validateConfigCluster(self, request):
 	errors = list()
 	messages = list()
+	rc = None
 
 	try:
 		model = request.SESSION.get('model')
 		if not model:
 			raise Exception, 'model is none'
 	except Exception, e:
-		luci_log.debug_verbose('VCC0: unable to get model from session')
-		return (False, {'errors': ['No cluster model was found.']})
+		model = None
+		try:
+			cluname = request.form['clustername']
+		except:
+			try:
+				cluname = request['clustername']
+			except:
+				luci_log.debug_verbose('VCC0a: no model, no cluster name')
+				return (False, {'errors': ['No cluster model was found.']})
+
+		rc = getRicciAgent(self, cluname)
+		if not rc:
+			luci_log.debug_verbose('VCCb: no model in session, unable to find a ricci agent for the %s cluster' % cluname)
+			return (False, {'errors': ['No cluster model was found.']})
+
+		try:
+			model = getModelBuilder(rc, rc.dom0())
+			if not model:
+				raise Exception, 'model is none'
+		except Exception, e:
+			luci_log.debug_verbose('VCCc: unable to get model builder for cluster %s: %s' % (cluname, str(e)))
+			model = None
 
-	if not 'form' in request:
-		luci_log.debug_verbose('VCC1: no form passed in')
-		return (False, {'errors': ['No form was submitted.']})
+		if model is None:
+			luci_log.debug_verbose('VCC0: unable to get model from session')
+			return (False, {'errors': ['No cluster model was found.']})
 
-	if not 'configtype' in request.form:
-		luci_log.debug_verbose('VCC2: no configtype')
+	try:
+		if not 'configtype' in request.form:
+			luci_log.debug_verbose('VCC2: no configtype')
+			raise Exception, 'no config type'
+	except Exception, e:
+		luci_log.debug_verbose('VCC2a: %s' % str(e))
 		return (False, {'errors': ['No configuration type was submitted.']})
 
 	if not request.form['configtype'] in configFormValidators:
@@ -780,10 +805,10 @@
 
 	if retcode == True:
 		try:
-			old_ver = cp.getConfigVersion()
+			config_ver = int(cp.getConfigVersion()) + 1
 			# always increment the configuration version
-			model.setConfigVersion(old_ver + 1)
-			conf_str = str(model.exportModelAsString())
+			cp.setConfigVersion(config_ver)
+			conf_str = model.exportModelAsString()
 			if not conf_str:
 				raise Exception, 'conf_str is none'
 		except Exception, e:
@@ -799,13 +824,18 @@
 		luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
 		errors.append('unable to determine cluster name from model') 
 
-	rc = getRicciAgent(self, clustername)
+	if len(errors) > 0:
+		return (retcode, {'errors': errors, 'messages': messages})
+
 	if not rc:
-		luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
-		errors.append('unable to contact a ricci agent for cluster %s' \
-			% clustername)
-	else:
-		batch_id, result = setClusterConf(rc, conf_str)
+		rc = getRicciAgent(self, clustername)
+		if not rc:
+			luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
+			errors.append('unable to contact a ricci agent for cluster %s' \
+				% clustername)
+
+	if rc:
+		batch_id, result = setClusterConf(rc, str(conf_str))
 		if batch_id is None or result is None:
 			luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
 			errors.append('unable to propagate the new cluster configuration for %s' \
@@ -858,8 +888,10 @@
     except:
       request.SESSION.set('checkRet', {})
   else:
-    try: request.SESSION.set('checkRet', {})
-    except: pass
+    try:
+      request.SESSION.set('checkRet', {})
+    except:
+      pass
 
   #First, see if a cluster is chosen, then
   #check that the current user can access that system
@@ -921,7 +953,7 @@
     clcfg['show_children'] = False
 
   #loop through all clusters
-  syslist= list()
+  syslist = list()
   for system in systems:
     clsys = {}
     clsys['Title'] = system[0]
@@ -1398,6 +1430,7 @@
   portaltabs = list()
   if not userAuthenticated(self):
     return portaltabs
+
   selectedtab = "homebase"
   try:
     baseurl = req['URL']
@@ -1410,11 +1443,6 @@
   except KeyError, e:
     pass
 
-  try:
-    base2 = req['BASE2']
-  except KeyError, e:
-    base2 = req['HTTP_HOST'] + req['SERVER_PORT']
-
   htab = { 'Title':"homebase",
            'Description':"Home base for this luci server",
            'Taburl':"/luci/homebase"}
@@ -1448,7 +1476,7 @@
 
 
 
-def check_clusters(self,clusters):
+def check_clusters(self, clusters):
   clist = list()
   for cluster in clusters:
     if cluster_permission_check(cluster[1]):
@@ -2029,12 +2057,41 @@
   else:
     return
 
-
 def getClusterInfo(self, model, req):
-  cluname = req[CLUNAME]
+  try:
+    cluname = req[CLUNAME]
+  except:
+    try:
+      cluname = req.form['clustername']
+    except:
+      try:
+        cluname = req.form['clusterName']
+      except:
+        luci_log.debug_verbose('GCI0: unable to determine cluster name')
+        return {}
+
+  if model is None:
+    rc = getRicciAgent(self, cluname)
+    if not rc:
+      luci_log.debug_verbose('GCI1: unable to find a ricci agent for the %s cluster' % cluname)
+      return {}
+    try:
+      model = getModelBuilder(rc, rc.dom0())
+      if not model:
+        raise Exception, 'model is none'
+
+      try:
+        req.SESSION.set('model', model)
+      except Exception, e2:
+        luci_log.debug_verbose('GCI2 unable to set model in session: %s' % str(e2))
+    except Exception, e:
+      luci_log.debug_verbose('GCI3: unable to get model for cluster %s: %s' % cluname, str(e))
+      return {}
+
   baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
+  prop_baseurl = req['URL'] + '?' + PAGETYPE + '=' + CLUSTER_CONFIG + '&' + CLUNAME + '=' + cluname + '&'
   map = {}
-  basecluster_url = baseurl + ACTIONTYPE + "=" + BASECLUSTER
+  basecluster_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_GENERAL_TAB
   #needed:
   map['basecluster_url'] = basecluster_url
   #name field
@@ -2046,7 +2103,7 @@
   #new cluster params - if rhel5
   #-------------
   #Fence Daemon Props
-  fencedaemon_url = baseurl + ACTIONTYPE + "=" + FENCEDAEMON
+  fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB
   map['fencedaemon_url'] = fencedaemon_url
   fdp = model.getFenceDaemonPtr()
   pjd = fdp.getAttribute('post_join_delay')
@@ -2061,7 +2118,7 @@
   map['pfd'] = pfd
   #-------------
   #if multicast
-  multicast_url = baseurl + ACTIONTYPE + "=" + MULTICAST
+  multicast_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_MCAST_TAB
   map['multicast_url'] = multicast_url
   #mcast addr
   is_mcast = model.isMulticast()
@@ -2075,7 +2132,7 @@
 
   #-------------
   #quorum disk params
-  quorumd_url = baseurl + ACTIONTYPE + "=" + QUORUMD
+  quorumd_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_QDISK_TAB
   map['quorumd_url'] = quorumd_url
   is_quorumd = model.isQuorumd()
   map['is_quorumd'] = is_quorumd
@@ -2146,7 +2203,7 @@
 
   return map
 
-def getClustersInfo(self,status,req):
+def getClustersInfo(self, status, req):
   map = {}
   nodelist = list()
   svclist = list()
@@ -2596,6 +2653,7 @@
 
 def getNodeInfo(self, model, status, request):
   infohash = {}
+  item = None
   baseurl = request['URL']
   nodestate = NODE_ACTIVE
   svclist = list()
@@ -2692,7 +2750,7 @@
   return infohash
   #get list of faildoms for node
 
-def getNodesInfo(self, model, status,req):
+def getNodesInfo(self, model, status, req):
   resultlist = list()
   nodelist = list()
   svclist = list()
@@ -2802,7 +2860,7 @@
       map = fencedev.getAttributes()
       try:
         map['pretty_name'] = FENCE_OPTS[fencedev.getAgentType()]
-      except Exception, e:
+      except:
         map['pretty_name'] = fencedev.getAgentType()
 
       return map
@@ -2837,7 +2895,7 @@
           fencedev[kee] = attr_hash[kee] #copy attrs over
         try:
           fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
-        except Exception, e:
+        except:
           fencedev['pretty_name'] = fd.getAgentType()
 
         nodes_used = list() #This section determines which nodes use the dev
@@ -3052,15 +3110,12 @@
 def getXenVMInfo(self, model, request):
 	try:
 		xenvmname = request['servicename']
-	except KeyError, e:
+	except:
 		try:
 			xenvmname = request.form['servicename']
 		except:
 			luci_log.debug_verbose('servicename is missing from request')
 			return {}
-	except:
-		luci_log.debug_verbose('servicename is missing from request')
-		return {}
 
 	try:
 		xenvm = model.retrieveXenVMsByName(xenvmname)
@@ -3271,7 +3326,7 @@
           propslist = list()
           propslist.append(LAST_STATUS)
           item[1].manage_delProperties(propslist)
-          item[1].manage_addProperty(LAST_STATUS,creation_status, "int")
+          item[1].manage_addProperty(LAST_STATUS, creation_status, "int")
           continue
           
     else:
@@ -3340,15 +3395,12 @@
 
 	try:
 		cluname = request['clustername']
-	except KeyError, e:
+	except:
 		try:
 			cluname = request.form['clustername']
 		except:
 			luci_log.debug_verbose('getResourcesInfo missing cluster name')
 			return resList
-	except:
-		luci_log.debug_verbose('getResourcesInfo missing cluster name')
-		return resList
 
 	for item in modelb.getResources():
 		itemmap = {}
@@ -3368,18 +3420,16 @@
 	name = None
 	try:
 		name = request['resourcename']
-	except KeyError, e:
+	except:
 		try:
 			name = request.form['resourcename']
 		except:
 			pass
-	except:
-		pass
 
 	if name is None:
 		try:
-			type = request.form['type']
-			if type == 'ip':
+			res_type = request.form['type']
+			if res_type == 'ip':
 				name = request.form['value'].strip()
 		except:
 			pass
@@ -3390,15 +3440,12 @@
 
 	try:
 		cluname = request['clustername']
-	except KeyError, e:
+	except:
 		try:
 			cluname = request.form['clustername']
 		except:
 			luci_log.debug_verbose('getResourceInfo missing cluster name')
 			return {}
-	except:
-		luci_log.debug_verbose('getResourceInfo missing cluster name')
-		return {}
 
 	try:
 		baseurl = request['URL']
@@ -3483,7 +3530,6 @@
 		luci_log.debug_verbose('delRes: missing batch and/or result from setClusterConf')
 		return errstr
 
-	modelstr = ""
 	path = CLUSTER_FOLDER_PATH + str(clustername)
 	clusterfolder = self.restrictedTraverse(path)
 	batch_id = str(batch_number)
@@ -3530,7 +3576,7 @@
 			return None
 	else:
 		try:
-			res = apply(Ip)
+			res = Ip()
 			if not res:
 				raise Exception, 'apply(Ip) is None'
 		except Exception, e:
@@ -3586,7 +3632,7 @@
 			return None
 	else:
 		try:
-			res = apply(Fs)
+			res = Fs()
 			if not res:
 				raise Exception, 'apply(Fs) is None'
 		except Exception, e:
@@ -3694,7 +3740,7 @@
 			return None
 	else:
 		try:
-			res = apply(Clusterfs)
+			res = Clusterfs()
 			if not res:
 				raise Exception, 'apply(Clusterfs) is None'
 		except Exception, e:
@@ -3781,7 +3827,7 @@
 			return None
 	else:
 		try:
-			res = apply(Netfs)
+			res = Netfs()
 		except Exception, e:
 			luci_log.debug_verbose('addNfsm error: %s' % str(e))
 			return None
@@ -3876,7 +3922,7 @@
 			return None
 	else:
 		try:
-			res = apply(NFSClient)
+			res = NFSClient()
 		except:
 			luci_log.debug_verbose('addNfsc error: %s' % str(e))
 			return None
@@ -3940,7 +3986,7 @@
 			return None
 	else:
 		try:
-			res = apply(NFSExport)
+			res = NFSExport()
 		except:
 			luci_log.debug_verbose('addNfsx error: %s', str(e))
 			return None
@@ -3988,7 +4034,7 @@
 			return None
 	else:
 		try:
-			res = apply(Script)
+			res = Script()
 		except Exception, e:
 			luci_log.debug_verbose('addScr error: %s' % str(e))
 			return None
@@ -4009,10 +4055,10 @@
 		luci_log.debug_verbose('addScr error: %s' % err)
 
 	try:
-		file = form['file'].strip()
-		if not file:
+		path = form['file'].strip()
+		if not path:
 			raise KeyError, 'file path is blank'
-		res.attr_hash['file'] = file
+		res.attr_hash['file'] = path
 	except Exception, e:
 		err = str(e)
 		errors.append(err)
@@ -4046,7 +4092,7 @@
 			return None
 	else:
 		try:
-			res = apply(Samba)
+			res = Samba()
 		except Exception, e:
 			luci_log.debug_verbose('addSmb error: %s' % str(e))
 			return None
@@ -4159,7 +4205,7 @@
 	
 	return messages
 
-def addResource(self, request, modelb, res, type):
+def addResource(self, request, modelb, res, res_type):
 	clustername = modelb.getClusterName()
 	if not clustername:
 		raise Exception, 'cluster name from modelb.getClusterName() is blank'
@@ -4204,7 +4250,7 @@
 		flag.manage_addProperty(BATCH_ID, batch_id, "string")
 		flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
 
-		if type != 'ip':
+		if res_type != 'ip':
 			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['name'] + "\'", "string")
 		else:
 			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
@@ -4330,11 +4376,11 @@
 		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
 		objpath = str(path + '/' + objname)
 		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batchid, 'string')
+		flag.manage_addProperty(BATCH_ID, batch_id, 'string')
 		flag.manage_addProperty(TASKTYPE, task, 'string')
 		flag.manage_addProperty(FLAG_DESC, desc)
 	except Exception, e:
 		errmsg = 'Error creating flag (%s,%s,%s) at %s: %s' \
-					% (batchid, task, desc, objpath, str(e))
+					% (batch_id, task, desc, objpath, str(e))
 		luci_log.debug_verbose(errmsg)
 		raise Exception, errmsg
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/11/03 22:48:15	1.22
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/11/06 23:55:23	1.23
@@ -55,6 +55,13 @@
 MULTICAST="203"
 QUORUMD="204"
 
+PROPERTIES_TAB = 'tab'
+
+PROP_GENERAL_TAB = '1'
+PROP_FENCE_TAB = '2'
+PROP_MCAST_TAB = '3'
+PROP_QDISK_TAB = '4'
+
 PAGETYPE="pagetype"
 ACTIONTYPE="actiontype"
 TASKTYPE="tasktype"
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/01 20:34:02	1.40
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/06 23:55:23	1.41
@@ -114,13 +114,6 @@
 						install_LVS,
 						upgrade_rpms):
 
-	if os_str == 'rhel5':
-		cluster_version = '5'
-	elif os_str == 'rhel4':
-		cluster_version = '4'
-	else:
-		cluster_version = 'unknown'
-
 	batch = '<?xml version="1.0" ?>'
 	batch += '<batch>'
 	batch += '<module name="rpm">'
@@ -276,7 +269,7 @@
 	return doc
 
 def getClusterStatusBatch(rc):
-	batch_str ='<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
 	ricci_xml = rc.batch_run(batch_str, async=False)
 
 	if not ricci_xml or not ricci_xml.firstChild:
@@ -318,8 +311,8 @@
 		if not log_entries or len(log_entries) < 1:
 			raise Exception, 'no log data is available.'
 	except Exception, e:
-		'Error retrieving log data from %s: %s' \
-			% (rc.hostname(), str(e))
+		luci_log.debug_verbose('Error retrieving log data from %s: %s' \
+			% (rc.hostname(), str(e)))
 		return None
 	time_now = time()
 	entry = ''
@@ -463,7 +456,6 @@
 		return None
 
 	resultlist = list()
-	svc_node = None
 	for node in varnode.childNodes:
 		if node.nodeName == 'service':
 			svchash = {}
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/01 20:34:02	1.17
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/06 23:55:23	1.18
@@ -1,10 +1,8 @@
-from time import *
-from socket import *
+from socket import socket, ssl, AF_INET, SOCK_STREAM
 import xml
 import xml.dom
 from xml.dom import minidom
 from LuciSyslog import LuciSyslog
-from HelperFunctions import access_to_host_allowed
 
 CERTS_DIR_PATH = '/var/lib/luci/var/certs/'
 
@@ -210,8 +208,8 @@
                 % (batch_xml_str, self.__hostname))
             batch_xml = minidom.parseString(batch_xml_str).firstChild
         except Exception, e:
-            luci_log.debug('received invalid batch XML for %s: \"%s\"' \
-                % (self.__hostname, batch_xml_str))
+            luci_log.debug('received invalid batch XML for %s: \"%s\": %s' \
+                % (self.__hostname, batch_xml_str, str(e)))
             raise RicciError, 'batch XML is malformed'
 
         try:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-05  0:59 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-05  0:59 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-05 00:59:10

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	propagate general cluster configuration changes

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.144&r2=1.145

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 22:48:15	1.144
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/05 00:59:09	1.145
@@ -634,6 +634,15 @@
 	errors = list()
 
 	try:
+		cp = model.getClusterPtr()
+		old_name = model.getClusterAlias()
+		old_ver = cp.getConfigVersion()
+	except Exception, e:
+		luci_log.debug_verbose('getConfigVersion: %s' % str(e))
+		errors.append('unable to determine the current configuration version')
+		return (False, {'errors': errors})
+
+	try:
 		cluster_name = form['cluname'].strip()
 		if not cluster_name:
 			raise KeyError('cluname')
@@ -642,17 +651,28 @@
 
 	try:
 		version_num = int(form['cfgver'])
-		if version_num < 0:
-			raise ValueError('configuration version numbers must be 0 or greater.')
+		if version_num < old_ver:
+			raise ValueError, 'configuration version number must be %d or greater.' \
+								% old_ver
+		if version_num == old_ver:
+			version_num += 1
 	except KeyError, e:
 		errors.append('No cluster configuration version was given.')
 	except ValueError, e:
-		errors.append('An invalid configuration version was given: ' + e)
+		errors.append('An invalid configuration version was given: %s' % str(e))
+
+	if len(errors) < 1:
+		try:
+			if cluster_name != old_name:
+				cp.addAttribute('alias', cluster_name)
+			model.setConfigVersion(version_num)
+		except Exception, e:
+			luci_log.debug_verbose('unable to update general properties: %s' % str(e))
+			errors.append('Unable to update the cluster configuration.')
 
 	if len(errors) > 0:
 		return (False, {'errors': errors})
-
-	return (True, {'messages': ['Changes accepted. - FILL ME IN']})
+	return (True, {})
 
 def validateFenceConfig(model, form):
 	errors = list()
@@ -721,6 +741,42 @@
 	if 'messages' in ret[1]:
 		messages.extend(ret[1]['messages'])
 
+	if retcode == True:
+		try:
+			conf_str = str(model.exportModelAsString())
+			if not conf_str:
+				raise Exception, 'conf_str is none'
+		except Exception, e:
+			luci_log.debug_verbose('VCC4: export model as string failed: %s' \
+				% str(e))
+			errors.append('unable to store the new cluster configuration')
+
+	try:
+		clustername = model.getClusterName()
+		if not clustername:
+			raise Exception, 'cluster name from modelb.getClusterName() is blank'
+	except Exception, e:
+		luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
+		errors.append('unable to determine cluster name from model') 
+
+	rc = getRicciAgent(self, clustername)
+	if not rc:
+		luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
+		errors.append('unable to contact a ricci agent for cluster %s' \
+			% clustername)
+	else:
+		batch_id, result = setClusterConf(rc, conf_str)
+		if batch_id is None or result is None:
+			luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
+			errors.append('unable to propagate the new cluster configuration for %s' \
+				% clustername)
+		else:
+			try:
+				set_node_flag(self, clustername, rc.hostname(), batch_id,
+					CLUSTER_CONFIG, 'Updating cluster configuration')
+			except:
+				pass
+
 	if len(errors) < 1:
 		messages.append('The cluster properties have been updated.')
 
@@ -4205,3 +4261,22 @@
 
 	modelb.setIsVirtualized(isVirtualized)
 	return modelb
+
+def set_node_flag(self, cluname, agent, batchid, task, desc):
+	path = str(CLUSTER_FOLDER_PATH + cluname)
+	batch_id = str(batchid)
+	objname = str(agent + '____flag')
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		objpath = str(path + '/' + objname)
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batchid, 'string')
+		flag.manage_addProperty(TASKTYPE, task, 'string')
+		flag.manage_addProperty(FLAG_DESC, desc)
+	except Exception, e:
+		errmsg = 'Error creating flag (%s,%s,%s)@%s: %s' \
+					% (batchid, task, desc, objpath, str(e))
+		luci_log.debug_verbose(errmsg)
+		raise Exception, errmsg



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-03 21:13 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-11-03 21:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-11-03 21:13:25

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Partial fix for broken fence list page

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.142&r2=1.143

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 19:13:57	1.142
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 21:13:25	1.143
@@ -19,6 +19,7 @@
 from Vm import Vm
 from Script import Script
 from Samba import Samba
+from FenceHandler import FenceHandler
 from clusterOS import resolveOSType
 from GeneralError import GeneralError
 from UnknownClusterError import UnknownClusterError
@@ -2671,38 +2672,83 @@
   for fencedev in fencedevs:
     if fencedev.getName().strip() == fencename:
       map = fencedev.getAttributes()
+      try:
+        map['pretty_name'] = FenceHandler.FENCE_OPTS[fencedev.getAgentType()]
+      except Exception, e:
+        map['pretty_name'] = fencedev.getAgentType()
+
       return map
 
   return map
   
 def getFenceInfo(self, model, request):
+  clustername = request['clustername']
+  baseurl = request['URL']
   map = {}
-  fencedevs = list() 
-  level1 = list()
-  level2 = list()
+  fencedevs = list() #This is for the fencedev list page
+  level1 = list() #First level fence devices
+  level2 = list() #Second level fence devices
+  shared1 = list() #List of available sharable fence devs not used in level1
+  shared2 = list() #List of available sharable fence devs not used in level2
   map['level1'] = level1
   map['level2'] = level2
+  map['shared1'] = shared1
+  map['shared2'] = shared2
   map['fencedevs'] = fencedevs
   nodename = ""
-  if request == None:  #this is being called by the fence device page
+  if request == None:  #this is being called by the fence device list page
     #Get list of fence devices
     fds = model.getFenceDevices()
     for fd in fds:
       #create fencedev hashmap
       if fd.isShared() == True:
-        fencedev = fd.getAttributes()
-        fencedevs.append(fencedev)
+        fencedev = {}
+        attr_hash = fd.getAttributes()
+        kees = attr_hash.keys()
+        for kee in kees:
+          fencedev[kee] = attr_hash[kee] #copy attrs over
+        try:
+          fencedev['pretty_name'] = FenceHandler.FENCE_OPTS[fd.getAgentType()]
+        except Exception, e:
+          fencedev['pretty_name'] = fd.getAgentType()
+
+        nodes_used = list() #This section determines which nodes use the dev
+        nodes = model.getNodes()
+        for node in nodes:
+          flevels = node.getFenceLevels()
+          for flevel in flevels: #These are the method blocks...
+            kids = flevel.getChildren()
+            for kid in kids: #These are actual devices in each level
+              if kid.getName().strip == fd.getName().strip():
+                #See if this fd already has an entry for this node
+                found_duplicate = False
+                for item in nodes_used:
+                  if item['nodename'] == node.getName().strip():
+                    found_duplicate = True
+                if found_duplicate == True:
+                  continue
+                node_hash = {}
+                node_hash['nodename'] = node.getName().strip()
+                node_hash['nodeurl'] = baseurl + "#fence" + "?clustername=" + clustername + "&nodename=" + node.getName() + "&pagetype=" + NODE 
+                nodes_used.append(node_hash)
+
+        fencedev['nodesused'] = nodes_used
+      fencedevs.append(fencedev)
       
     return map
 
   else:
+    pass
     try:
       nodename = request['nodename']
     except KeyError, e:
       raise GeneralError('FATAL', "Could not extract nodename from request")
     
-    #here we need to get fences for a node - just the first two levels
-    #then fill in two data structures with all attr's 
+    #Here we need to get fences for a node - just the first two levels
+    #Each level has its own list of fence devs used in that level
+    #For each fence dev, a list of instance structs is appended
+    #In addition, for each level, a list of available but unused fence devs
+    #is returned. 
     try:
       node = model.retrieveNodeByName(nodename)
     except GeneralError, e:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-03  1:24 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-03  1:24 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-03 01:24:56

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix the log url on the cluster node list page

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.140&r2=1.141

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 01:08:01	1.140
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 01:24:56	1.141
@@ -2551,15 +2551,16 @@
 
   #return infohash
   infohash['d_states'] = None
+
+  try:
+    nodename_resolved = resolve_nodename(self, clustername, nodename)
+  except:
+    luci_log.debug_verbose('Unable to resolve node name %s/%s to retrieve daemon information' % (nodename, clustername))
+    nodename_resolved = nodename
+
   if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
   #call service module on node and find out which daemons are running
     try:
-      nodename_resolved = resolve_nodename(self, clustername, nodename)
-    except:
-      luci_log.debug_verbose('Unable to resolve node name %s/%s to retrieve daemon information' % (nodename, clustername))
-      nodename_resolved = nodename
-
-    try:
       rc = RicciCommunicator(nodename_resolved)
       if not rc:
         raise Exception, 'rc is none'
@@ -2576,14 +2577,12 @@
       dlist.append("rgmanager")
       states = getDaemonStates(rc, dlist)
       infohash['d_states'] = states
-  else:
-    nodename_resolved = nodename
 
   infohash['logurl'] = '/luci/logs/?nodename=' + nodename_resolved + '&clustername=' + clustername
   return infohash
   #get list of faildoms for node
 
-def getNodesInfo(self, model,status,req):
+def getNodesInfo(self, model, status,req):
   resultlist = list()
   nodelist = list()
   svclist = list()
@@ -2615,7 +2614,15 @@
       map['status'] = NODE_INACTIVE
       map['status_str'] = NODE_INACTIVE_STR
 
-    map['logurl'] = '/luci/logs?nodename=' + name + '&clustername=' + clustername
+    try:
+      nodename_resolved = resolve_nodename(self, clustername, name)
+    except:
+      luci_log.debug_verbose('Unable to resolve node name %s/%s' \
+          % (nodename, clustername))
+      nodename_resolved = name 
+
+    map['logurl'] = '/luci/logs?nodename=' + nodename_resolved + '&clustername=' + clustername
+
     #set up URLs for dropdown menu...
     if map['status'] == NODE_ACTIVE:
       map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + name + "&clustername=" + clustername



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-03  1:08 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-03  1:08 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-03 01:08:01

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	log a debug message when ricci connections fail for unknown reasons and fix a corner case when generating log urls

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.139&r2=1.140

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 20:58:48	1.139
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/03 01:08:01	1.140
@@ -2296,13 +2296,11 @@
 			try:
 				rc = RicciCommunicator(node[1].getId())
 				if not rc:
-					continue
-			except RicciError, e:
+					raise Exception, 'rc is None'
+			except Exception, e:
 				luci_log.debug('ricci error for host %s: %s' \
 					% (node[0], str(e)))
 				continue
-			except:
-				continue
 
 			if not rc.authed():
 				rc = None
@@ -2560,6 +2558,7 @@
     except:
       luci_log.debug_verbose('Unable to resolve node name %s/%s to retrieve daemon information' % (nodename, clustername))
       nodename_resolved = nodename
+
     try:
       rc = RicciCommunicator(nodename_resolved)
       if not rc:
@@ -2577,6 +2576,8 @@
       dlist.append("rgmanager")
       states = getDaemonStates(rc, dlist)
       infohash['d_states'] = states
+  else:
+    nodename_resolved = nodename
 
   infohash['logurl'] = '/luci/logs/?nodename=' + nodename_resolved + '&clustername=' + clustername
   return infohash
@@ -4089,6 +4090,7 @@
 
 		#a flag already exists... try to delete it
 		try:
+			# hostname must be a FQDN
 			rc = RicciCommunicator(hostname)
 		except RicciError, e:
 			luci_log.info('Unable to connect to the ricci daemon: %s' % str(e))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-02 20:58 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-02 20:58 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-02 20:58:48

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	use the host data from rc.hostname(), not rc.system_info() when deploying a cluster (it shouldn't matter, since we force the user to enter something sensible to begin with).

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.138&r2=1.139

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 20:45:18	1.138
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 20:58:48	1.139
@@ -188,7 +188,7 @@
 		batchNode = createClusterBatch(cluster_os,
 						clusterName,
 						clusterName,
-						map(lambda x: x['ricci_host'], nodeList),
+						map(lambda x: x['host'], nodeList),
 						True,
 						True,
 						enable_storage,
@@ -213,10 +213,10 @@
 		for i in nodeList:
 			success = True
 			try:
-				rc = RicciCommunicator(i['ricci_host'])
+				rc = RicciCommunicator(i['host'])
 			except RicciError, e:
 				luci_log.debug('Unable to connect to the ricci agent on %s: %s'\
-					% (i['ricci_host'], str(e)))
+					% (i['host'], str(e)))
 				success = False
 			except:
 				success = False
@@ -224,14 +224,14 @@
 			if success == True:
 				try:
 					resultNode = rc.process_batch(batchNode, async=True)
-					batch_id_map[i['ricci_host']] = resultNode.getAttribute('batch_id')
+					batch_id_map[i['host']] = resultNode.getAttribute('batch_id')
 				except:
 					success = False
 
 			if not success:
 				nodeUnauth(nodeList)
 				cluster_properties['isComplete'] = False
-				errors.append('An error occurred while attempting to add cluster node \"' + i['ricci_host'] + '\"')
+				errors.append('An error occurred while attempting to add cluster node \"' + i['host'] + '\"')
 				return (False, {'errors': errors, 'requestResults':cluster_properties })
 		buildClusterCreateFlags(self, batch_id_map, clusterName)
 
@@ -346,7 +346,7 @@
 			clunode['errors'] = True
 			nodeUnauth(nodeList)
 			cluster_properties['isComplete'] = False
-			errors.append('Unable to initiate node creation for host \"' + clunode['ricci_host'] + '\"')
+			errors.append('Unable to initiate node creation for host \"' + clunode['host'] + '\"')
 
 	if not cluster_properties['isComplete']:
 		return (False, {'errors': errors, 'requestResults': cluster_properties})
@@ -363,25 +363,25 @@
 		clunode = nodeList[i]
 		success = True
 		try:
-			rc = RicciCommunicator(clunode['ricci_host'])
+			rc = RicciCommunicator(clunode['host'])
 		except:
-			luci_log.info('Unable to connect to the ricci daemon on host ' + clunode['ricci_host'])
+			luci_log.info('Unable to connect to the ricci daemon on host ' + clunode['host'])
 			success = False
 
 		if success:
 			try:
 				resultNode = rc.process_batch(batchNode, async=True)
-				batch_id_map[clunode['ricci_host']] = resultNode.getAttribute('batch_id')
+				batch_id_map[clunode['host']] = resultNode.getAttribute('batch_id')
 			except:
 				success = False
 
 		if not success:
 			nodeUnauth(nodeList)
 			cluster_properties['isComplete'] = False
-			errors.append('An error occurred while attempting to add cluster node \"' + clunode['ricci_host'] + '\"')
+			errors.append('An error occurred while attempting to add cluster node \"' + clunode['host'] + '\"')
 			return (False, {'errors': errors, 'requestResults': cluster_properties})
 
-			messages.append('Cluster join initiated for host \"' + clunode['ricci_host'] + '\"')
+			messages.append('Cluster join initiated for host \"' + clunode['host'] + '\"')
 
 	buildClusterCreateFlags(self, batch_id_map, clusterName)
 	return (True, {'errors': errors, 'messages': messages})



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-02 20:45 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-02 20:45 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-02 20:45:18

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix log link generation for hosts without a FQDN in the cluster.conf file

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.137&r2=1.138

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 20:41:29	1.137
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 20:45:18	1.138
@@ -2578,7 +2578,7 @@
       states = getDaemonStates(rc, dlist)
       infohash['d_states'] = states
 
-  infohash['logurl'] = '/luci/logs/?nodename=' + nodename + '&clustername=' + clustername
+  infohash['logurl'] = '/luci/logs/?nodename=' + nodename_resolved + '&clustername=' + clustername
   return infohash
   #get list of faildoms for node
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-02 20:41 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-02 20:41 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-02 20:41:29

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix for viewing cluster nodes without fqdn

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.136&r2=1.137

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 03:17:07	1.136
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 20:41:29	1.137
@@ -264,7 +264,7 @@
 	requestResults = {}
 
 	try:
-	 	sessionData = request.SESSION.get('checkRet')
+		sessionData = request.SESSION.get('checkRet')
 	except:
 		sessionData = None
 
@@ -2555,14 +2555,28 @@
   infohash['d_states'] = None
   if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
   #call service module on node and find out which daemons are running
-    rc = RicciCommunicator(nodename)
-    dlist = list()
-    dlist.append("ccsd")
-    dlist.append("cman")
-    dlist.append("fenced")
-    dlist.append("rgmanager")
-    states = getDaemonStates(rc, dlist)
-    infohash['d_states'] = states
+    try:
+      nodename_resolved = resolve_nodename(self, clustername, nodename)
+    except:
+      luci_log.debug_verbose('Unable to resolve node name %s/%s to retrieve daemon information' % (nodename, clustername))
+      nodename_resolved = nodename
+    try:
+      rc = RicciCommunicator(nodename_resolved)
+      if not rc:
+        raise Exception, 'rc is none'
+    except Exception, e:
+      rc = None
+      luci_log.info('Error connecting to %s: %s' \
+          % (nodename_resolved, str(e)))
+
+    if rc is not None:
+      dlist = list()
+      dlist.append("ccsd")
+      dlist.append("cman")
+      dlist.append("fenced")
+      dlist.append("rgmanager")
+      states = getDaemonStates(rc, dlist)
+      infohash['d_states'] = states
 
   infohash['logurl'] = '/luci/logs/?nodename=' + nodename + '&clustername=' + clustername
   return infohash
@@ -2716,7 +2730,7 @@
           name = kid.getName()
           found_fd = False
           if not i in map:
-			continue
+            continue
           for entry in map[i]:
             if entry['name'] == name:
               fence_struct = entry
@@ -2838,7 +2852,7 @@
   try:
     stringbuf = model.exportModelAsString()
     if not stringbuf:
-   	  raise Exception, 'model is blank'
+      raise Exception, 'model is blank'
   except Exception, e:
     luci_log.debug_verbose('exportModelAsString error: %s' % str(e))
     return None
@@ -2871,7 +2885,7 @@
 		luci_log.debug_verbose('servicename is missing from request')
 		return {}
 
-	try:  
+	try:
 		xenvm = model.retrieveXenVMsByName(xenvmname)
 	except:
 		luci_log.debug('An error occurred while attempting to get VM %s' \



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-11-02  3:17 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-11-02  3:17 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-02 03:17:07

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 

Log message:
	homebase_adapters cleanup, logging, and exception robustness
	fix a bug that causes IP resource creation to fail in cluster_adapters

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.135&r2=1.136
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.38&r2=1.39

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/31 17:18:12	1.135
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/02 03:17:07	1.136
@@ -3090,7 +3090,11 @@
       rc = RicciCommunicator(ricci[0])
       finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
       if finished == True:
-        node_report['desc'] = item[1].getProperty(FLAG_DESC) + REDIRECT_MSG
+        flag_desc = item[1].getProperty(FLAG_DESC)
+        if flag_desc is None:
+          node_report['desc'] = REDIRECT_MSG
+        else:
+          node_report['desc'] = flag_desc + REDIRECT_MSG
         nodereports.append(node_report)
         try:
             clusterfolder.manage_delObjects(item[0])
@@ -4009,7 +4013,7 @@
 		flag.manage_addProperty(BATCH_ID, batch_id, "string")
 		flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
 
-		if type != 'ip':
+		if res.attr_hash['type'] != 'ip':
 			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['name'] + "\'", "string")
 		else:
 			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/11/01 20:34:02	1.38
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/11/02 03:17:07	1.39
@@ -1,23 +1,20 @@
-import string
 import re
-import sys
 import os
 from AccessControl import getSecurityManager
-from ZPublisher import HTTPRequest
-import xml.dom
 import cgi
 
-from ricci_defines import *
+from conga_constants import PLONE_ROOT, CLUSTER_NODE_NEED_AUTH, \
+							HOMEBASE_ADD_CLUSTER, HOMEBASE_ADD_CLUSTER_INITIAL, \
+							HOMEBASE_ADD_SYSTEM, HOMEBASE_ADD_USER, \
+							HOMEBASE_DEL_SYSTEM, HOMEBASE_DEL_USER, HOMEBASE_PERMS
 from ricci_bridge import getClusterConf
-from ricci_communicator import RicciCommunicator
-from ricci_communicator import CERTS_DIR_PATH
+from ricci_communicator import RicciCommunicator, CERTS_DIR_PATH
 from clusterOS import resolveOSType
-from conga_constants import *
-from LuciSyslog import LuciSyslog, LuciSyslogError
+from LuciSyslog import LuciSyslog
 
 try:
 	luci_log = LuciSyslog()
-except LuciSyslogError, e:
+except:
 	pass
 
 def siteIsSetup(self):
@@ -27,8 +24,8 @@
 	except: pass
 	return False
 
-def strFilter(regex, replaceChar, str):
-	return re.sub(regex, replaceChar, str)
+def strFilter(regex, replaceChar, arg):
+	return re.sub(regex, replaceChar, arg)
 
 def validateDelSystem(self, request):
 	errors = list()
@@ -74,6 +71,8 @@
 
 	try:
 		user = self.portal_membership.getMemberById(userId)
+		if not user:
+			raise Exception, 'user %s does not exist' % userId
 	except:
 		return (False, {'errors': [ 'No such user: \"' + userId + '\"' ] })
 
@@ -531,7 +530,7 @@
 						i[1].manage_setLocalRoles(userId, roles)
 						messages.append('Added permission for ' + userId + ' for cluster ' + i[0])
 				except:
-						errors.append('Failed to add permission for ' + userId + ' for cluster ' + i[0])
+					errors.append('Failed to add permission for ' + userId + ' for cluster ' + i[0])
 			else:
 				try:
 					if user.has_role('View', i[1]):
@@ -545,7 +544,7 @@
 
 						messages.append('Removed permission for ' + userId + ' for cluster ' + i[0])
 				except:
-						errors.append('Failed to remove permission for ' + userId + ' for cluster ' + i[0])
+					errors.append('Failed to remove permission for ' + userId + ' for cluster ' + i[0])
 
 	storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
 	if not '__SYSTEM' in request.form:
@@ -572,7 +571,7 @@
 						i[1].manage_setLocalRoles(userId, roles)
 						messages.append('Added permission for ' + userId + ' for system ' + i[0])
 				except:
-						errors.append('Failed to add permission for ' + userId + ' for system ' + i[0])
+					errors.append('Failed to add permission for ' + userId + ' for system ' + i[0])
 			else:
 				try:
 					if user.has_role('View', i[1]):
@@ -586,7 +585,7 @@
 
 						messages.append('Removed permission for ' + userId + ' for system ' + i[0])
 				except:
-						errors.append('Failed to remove permission for ' + userId + ' for system ' + i[0])
+					errors.append('Failed to remove permission for ' + userId + ' for system ' + i[0])
 
 	if len(errors) > 0:
 		returnCode = False
@@ -665,23 +664,25 @@
 ]
 
 def userAuthenticated(self):
-	if (isAdmin(self) or getSecurityManager().getUser().has_role('Authenticated', self.restrictedTraverse(PLONE_ROOT))):
-		return True
-
+	try:
+		if (isAdmin(self) or getSecurityManager().getUser().has_role('Authenticated', self.restrictedTraverse(PLONE_ROOT))):
+			return True
+	except Exception, e:
+		luci_log.debug_verbose('UA0: %s' % str(e)) 
 	return False
 
 def isAdmin(self):
 	try:
 		return getSecurityManager().getUser().has_role('Owner', self.restrictedTraverse(PLONE_ROOT))
-	except:
-		pass
+	except Exception, e:
+		luci_log.debug_verbose('IA0: %s' % str(e)) 
 	return False
 
 def userIsAdmin(self, userId):
 	try:
 		return self.portal_membership.getMemberById(userId).has_role('Owner', self.restrictedTraverse(PLONE_ROOT))
-	except:
-		pass
+	except Exception, e:
+		luci_log.debug_verbose('UIA0: %s: %s' % (userId, str(e)))
 	return False
 
 def homebaseControlPost(self, request):
@@ -698,15 +699,19 @@
 	if 'pagetype' in request.form:
 		pagetype = int(request.form['pagetype'])
 	else:
-		try: request.SESSION.set('checkRet', {})
-		except: pass
+		try:
+			request.SESSION.set('checkRet', {})
+		except:
+			pass
 		return homebasePortal(self, request, '.', '0')
 
 	try:
 		validatorFn = formValidators[pagetype - 1]
 	except:
-		try: request.SESSION.set('checkRet', {})
-		except: pass
+		try:
+			request.SESSION.set('checkRet', {})
+		except:
+			pass
 		return homebasePortal(self, request, '.', '0')
 
 	if validatorFn == validateAddClusterInitial or validatorFn == validateAddCluster:
@@ -913,71 +918,111 @@
 
 def getClusterSystems(self, clusterName):
 	if isAdmin(self):
-		return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
+		try:
+			return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
+		except Exception, e:
+			luci_log.debug_verbose('GCS0: %s: %s' % (clusterName, str(e)))
+			return None
 
 	try:
 		i = getSecurityManager().getUser()
 		if not i:
-			raise
-	except:
+			raise Exception, 'GCSMGU failed'
+	except Exception, e:
+		luci_log.debug_verbose('GCS1: %s: %s' % (clusterName, str(e)))
 		return None
 
-	csystems = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
-	if not csystems:
+	try:
+		csystems = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
+		if not csystems or len(csystems) < 1:
+			return None
+	except Exception, e:
+		luci_log.debug_verbose('GCS2: %s: %s' % (clusterName, str(e)))
 		return None
 
 	allowedCSystems = list()
 	for c in csystems:
-		if i.has_role('View', c[1]):
-			allowedCSystems.append(c)
-	return (c)
+		try:
+			if i.has_role('View', c[1]):
+				allowedCSystems.append(c)
+		except Exception, e:
+			luci_log.debug_verbose('GCS3: %s: %s: %s' \
+				% (clusterName, c[0], str(e)))
+
+	return allowedCSystems
 
 def getClusters(self):
 	if isAdmin(self):
-		return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
+		try:
+			return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
+		except Exception, e:
+			luci_log.debug_verbose('GC0: %s' % str(e))
+			return None
 	try:
 		i = getSecurityManager().getUser()
 		if not i:
-			raise
-	except:
+			raise Exception, 'GSMGU failed'
+	except Exception, e:
+		luci_log.debug_verbose('GC1: %s' % str(e))
 		return None
 
-	clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
-	if not clusters:
+	try:
+		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
+		if not clusters or len(clusters) < 1:
+			return None
+	except Exception, e:
+		luci_log.debug_verbose('GC2: %s' % str(e))
 		return None
 
 	allowedClusters = list()
 	for c in clusters:
-		if i.has_role('View', c[1]):
-			allowedClusters.append(c)
+		try:
+			if i.has_role('View', c[1]):
+				allowedClusters.append(c)
+		except Exception, e:
+			luci_log.debug_verbose('GC3: %s: %s' % (c[0], str(e)))
 
 	return allowedClusters
 
 def getStorage(self):
 	if isAdmin(self):
-		return self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
+		try:
+			return self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
+		except Exception, e:
+			luci_log.debug_verbose('GS0: %s' % str(e))
+			return None
+
 	try:
 		i = getSecurityManager().getUser()
 		if not i:
-			return None
-	except:
+			raise Exception, 'GSMGU failed'
+	except Exception, e:
+		luci_log.debug_verbose('GS1: %s' % str(e))
 		return None
 
-	storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
-	if not storage:
+	try:
+		storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
+		if not storage or len(storage) < 1:
+			return None
+	except Exception, e:
+		luci_log.debug_verbose('GS2: %s' % str(e))
 		return None
 
 	allowedStorage = list()
 	for s in storage:
-		if i.has_role('View', s[1]):
-			allowedStorage.append(s)
+		try:
+			if i.has_role('View', s[1]):
+				allowedStorage.append(s)
+		except Exception, e:
+			luci_log.debug_verbose('GS3: %s' % str(e))
 
 	return allowedStorage
 
 def createSystem(self, host, passwd):
 	try:
 		exists = self.restrictedTraverse(PLONE_ROOT +'/systems/storage/' + host)
-		return 'Storage system \"' + host + '\" is already managed.'
+		luci_log.debug_verbose('CS0: %s already exists' % host)
+		return 'Storage system %s is already managed' % host
 	except:
 		pass
 
@@ -986,49 +1031,52 @@
 		if rc is None:
 			raise Exception, 'unknown error'
 	except Exception, e:
+		luci_log.debug_verbose('CS1: %s: %s' % (host, str(e)))
 		return 'Unable to establish a connection to the ricci agent on %s: %s' \
 			% (host, str(e))
 
 	try:
 		if not rc.authed():
 			rc.auth(passwd)
-	except:
-		return 'Unable to communicate with the ricci agent on \"' + host + '\" for authentication'
+	except Exception, e:
+		luci_log.debug_verbose('CS2: %s: %s' % (host, str(e)))
+		return 'Unable to communicate with the ricci agent on %s for authentication' % host
 
 	try:
 		i = rc.authed()
-	except:
-		return 'Unable to authenticate to the ricci agent on \"' + host + '\"'
+	except Exception, e:
+		luci_log.debug_verbose('CS3 %s: %s' % (host, str(e)))
+		return 'Unable to authenticate to the ricci agent on %s' % host
 
 	if i != True:
-		return 'Authentication for storage system \"' + host + '\" failed'
-
-#	rhost = rc.system_name()
-#	if rhost and rhost != host and rhost[:9] != 'localhost' and rhost[:5] != '127.0':
-#		host = str(rhost)
+		return 'Authentication for storage system %s failed' % host
 
 	try:
-		exists = self.restrictedTraverse(PLONE_ROOT +'/systems/storage/' + host)
-		return 'Storage system \"' + host + '\" is already managed.'
+		exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+		luci_log.debug_verbose('CS4 %s already exists' % host)
+		return 'Storage system %s is already managed' % host
 	except:
 		pass
 
 	try:
 		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
 	except Exception, e:
-		return 'Unable to create storage system %s: %s' % (host, str(e))
+		luci_log.debug_verbose('CS5 %s: %s' % (host, str(e)))
+		return 'Unable to create storage system %s: %s' % host
 
 	try:
 		ssystem.manage_addFolder(host, '__luci__:system')
 		newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
 	except Exception, e:
-		return 'Unable to create storage system %s: %s' % (host, str(e))
+		luci_log.debug_verbose('CS6 %s: %s' % (host, str(e)))
+		return 'Unable to create DB entry for storage system %s' % host
 
 	try:
 		newSystem.manage_acquiredPermissions([])
-		newSystem.manage_role('View', ['Access contents information','View'])
+		newSystem.manage_role('View', ['Access contents information', 'View'])
 	except Exception, e:
-		return 'Unable to set permissions on storage system %s: %s' % (host, str(e))
+		luci_log.debug_verbose('CS7 %s: %s' % (host, str(e)))
+		return 'Unable to set permissions on storage system %s' % host
 
 	return None
 
@@ -1036,26 +1084,27 @@
 	try:
 		sessionData = request.SESSION.get('checkRet')
 		nodeUnauth(sessionData['requestResults']['nodeList'])
-	except:
-		pass
+	except Exception, e:
+		luci_log.debug_verbose('AMC0: %s' % str(e))
 
 def manageCluster(self, clusterName, nodeList):
 	clusterName = str(clusterName)
-	luci_log.debug_verbose('manageCluster for %s' % clusterName)
 
 	try:
 		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/')
 		if not clusters:
 			raise Exception, 'cannot find the cluster entry in the DB'
-	except:
+	except Exception, e:
 		nodeUnauth(nodeList)
-		return 'Unable to create cluster \"' + clusterName + '\": the cluster directory is missing.'
+		luci_log.debug_verbose('MC0: %s: %s' % (clusterName, str(e)))
+		return 'Unable to create cluster %s: the cluster directory is missing.' % clusterName
 
 	try:
 		newCluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
 		if newCluster:
 			nodeUnauth(nodeList)
-			return 'A cluster named \"' + clusterName + '\" is already managed by Luci'
+			luci_log.debug_verbose('MC1: cluster %s: already exists' % clusterName)
+			return 'A cluster named %s is already managed by Luci' % clusterName
 	except:
 		pass
 
@@ -1063,20 +1112,22 @@
 		clusters.manage_addFolder(clusterName, '__luci__:cluster')
 		newCluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
 		if not newCluster:
-			raise Exception, 'unable to find cluster folder for %s' % clusterName
+			raise Exception, 'unable to create the cluster DB entry for %s' % clusterName
 	except Exception, e:
 		nodeUnauth(nodeList)
+		luci_log.debug_verbose('MC2: %s: %s' % (clusterName, str(e)))
 		return 'Unable to create cluster %s: %s' % (clusterName, str(e))
 
 	try:
 		newCluster.manage_acquiredPermissions([])
-		newCluster.manage_role('View', ['Access Contents Information','View'])
+		newCluster.manage_role('View', ['Access Contents Information', 'View'])
 	except Exception, e:
+		luci_log.debug_verbose('MC3: %s: %s' % (clusterName, str(e)))
 		nodeUnauth(nodeList)
 		try:
 			clusters.manage_delObjects([clusterName])
-		except:
-			pass
+		except Exception, e:
+			luci_log.debug_verbose('MC4: %s: %s' % (clusterName, str(e)))
 		return 'Unable to set permissions on new cluster: %s: %s' % (clusterName, str(e))
 
 	try:
@@ -1084,14 +1135,14 @@
 		if not cluster_os:
 			raise KeyError, 'Cluster OS is blank'
 	except KeyError, e:
-		luci_log.debug_verbose('Warning adding cluster %s: %s' \
-			% (clusterName, str(e)))
+		luci_log.debug_verbose('MC5: %s: %s' % (clusterName, str(e)))
 		cluster_os = 'rhel5'
 
 	try:
 		newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
-	except:
-		pass # we were unable to set the OS property string on this cluster
+	except Exception, e:
+		luci_log.debug_verbose('MC5: %s: %s: %s' \
+			% (clusterName, cluster_os, str(e)))
 
 	for i in nodeList:
 		#if 'ricci_host' in i:
@@ -1103,15 +1154,19 @@
 			newCluster.manage_addFolder(host, '__luci__:csystem:' + clusterName)
 			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
 			if not newSystem:
-				raise Exception, 'unable to create cluster system DB entry'
+				raise Exception, 'unable to create cluster system DB entry for node %s' % host
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
 			nodeUnauth(nodeList)
 			try:
 				clusters.manage_delObjects([clusterName])
-			except:
-				pass
+			except Exception, e:
+				luci_log.debug_verbose('MC6: %s: %s: %s' \
+					% (clusterName, host, str(e)))
+
+			luci_log.debug_verbose('MC7: %s: %s: %s' \
+				% (clusterName, host, str(e)))
 			return 'Unable to create cluster node %s for cluster %s: %s' \
 				% (host, clusterName, str(e))
 
@@ -1120,6 +1175,7 @@
 		if not ssystem:
 			raise Exception, 'The storage DB entry is missing'
 	except Exception, e:
+		luci_log.debug_verbose('MC8: %s: %s: %s' % (clusterName, host, str(e)))
 		return 'Error adding storage node %s: %s' % (host, str(e))
 
 	# Only add storage systems if the cluster and cluster node DB
@@ -1134,22 +1190,25 @@
 			# It's already there, as a storage system, no problem.
 			exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
 			continue
-		except: pass
+		except:
+			pass
 
 		try:
 			ssystem.manage_addFolder(host, '__luci__:system')
 			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
-		except: pass
+		except Exception, e:
+			luci_log.debug_verbose('MC9: %s: %s: %s' % (clusterName, host, str(e)))
 
 def createClusterSystems(self, clusterName, nodeList):
 	try:
 		clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
 		if not clusterObj:
 			raise Exception, 'cluster %s DB entry is missing' % clusterName
-	except:
+	except Exception, e:
 		nodeUnauth(nodeList)
+		luci_log.debug_verbose('CCS0: %s: %s' % (clusterName, str(e)))
 		return 'No cluster named \"' + clusterName + '\" is managed by Luci'
 
 	for i in nodeList:
@@ -1168,6 +1227,7 @@
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
 			nodeUnauth(nodeList)
+			luci_log.debug_verbose('CCS1: %s: %s: %s' % (clusterName, host, str(e)))
 			return 'Unable to create cluster node %s for cluster %s: %s' \
 				% (host, clusterName, str(e))
 
@@ -1176,8 +1236,7 @@
 		if not ssystem:
 			raise Exception, 'storage DB entry is missing'
 	except Exception, e:
-		luci_log.debug_verbose('Error: adding storage DB node for %s: %s' \
-			% (host, str(e)))
+		luci_log.debug_verbose('CCS2: %s: %s' % (clusterName, host, str(e)))
 		return
 
 	# Only add storage systems if the and cluster node DB
@@ -1192,14 +1251,16 @@
 			# It's already there, as a storage system, no problem.
 			exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
 			continue
-		except: pass
+		except:
+			pass
 
 		try:
 			ssystem.manage_addFolder(host, '__luci__:system')
 			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
-		except: pass
+		except Exception, e:
+			luci_log.debug_verbose('CCS3: %s: %s' % (clusterName, host, str(e)))
 
 def delSystem(self, systemName):
 	try:
@@ -1207,6 +1268,7 @@
 		if not ssystem:
 			raise Exception, 'storage DB entry is missing'
 	except Exception, e:
+		luci_log.debug_verbose('delSystem0: %s: %s' % (systemName, str(e)))
 		return 'Unable to find storage system %s: %s' % (systemName, str(e))
 
 	try:
@@ -1216,27 +1278,33 @@
 	except Exception, e:
 		try:
 			ssystem.manage_delObjects([systemName])
-		except:
-			return 'Unable to delete the storage system \"' + systemName + '\"'
-		luci_log.debug_verbose('ricci error for %s: %s' % (systemName, str(e)))
+		except Exception, e:
+			luci_log.debug_verbose('delSystem1: %s: %s' % (systemName, str(e)))
+			return 'Unable to delete the storage system %s' % systemName
+		luci_log.debug_verbose('delSystem2: %s: %s' % (systemName, str(e)))
 		return
 
 	# Only unauthenticate if the system isn't a member of
 	# a managed cluster.
 	cluster_info = rc.cluster_info()
 	if not cluster_info[0]:
-		try: rc.unauth()
-		except: pass
+		try:
+			rc.unauth()
+		except:
+			pass
 	else:
 		try:
 			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + cluster_info[0] + '/' + systemName)
 		except:
-			try: rc.unauth()
-			except: pass
+			try:
+				rc.unauth()
+			except:
+				pass
 
 	try:
 		ssystem.manage_delObjects([systemName])
 	except Exception, e:
+		luci_log.debug_verbose('delSystem3: %s: %s' % (systemName, str(e)))
 		return 'Unable to delete storage system %s: %s' \
 			% (systemName, str(e))
 
@@ -1244,9 +1312,10 @@
 	try:
 		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/')
 		if not clusters:
-			raise
-	except:
-		return 'Unable to find cluster \"' + clusterName + '\"'
+			raise Exception, 'clusters DB entry is missing'
+	except Exception, e:
+		luci_log.debug_verbose('delCluster0: %s' % str(e))
+		return 'Unable to find cluster %s' % clusterName
 
 	err = delClusterSystems(self, clusterName)
 	if err:
@@ -1254,26 +1323,28 @@
 
 	try:
 		clusters.manage_delObjects([clusterName])
-	except:
-		return 'Unable to delete cluster \"' + clusterName + '\"'
+	except Exception, e:
+		luci_log.debug_verbose('delCluster1: %s' % str(e))
+		return 'Unable to delete cluster %s' % clusterName
 
 def delClusterSystem(self, cluster, systemName):
 	try:
 		if not self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + systemName):
 			raise
 	except:
+		# It's not a storage system, so unauthenticate.
 		try:
 			rc = RicciCommunicator(systemName)
 			rc.unauth()
 		except Exception, e:
-			luci_log.debug_verbose('ricci error for %s: %s' \
+			luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %s' \
 				% (systemName, str(e)))
 
 	try:
 		cluster.manage_delObjects([systemName])
 	except Exception, e:
 		err_str = 'Error deleting cluster object %s: %s' % (systemName, str(e))
-		luci_log.debug_verbose(err_str)
+		luci_log.debug_verbose('delClusterSystem1: %s' % err_str)
 		return err_str
 
 def delClusterSystems(self, clusterName):
@@ -1285,7 +1356,7 @@
 	except Exception, e:
 		luci_log.debug_verbose('delCluSysterms: error for %s: %s' \
 			% (clusterName, str(e)))
-		return 'Unable to find any systems for cluster \"' + clusterName + '\"'
+		return 'Unable to find any systems for cluster %s' % clusterName
 
 	errors = ''
 	for i in csystems:
@@ -1297,34 +1368,65 @@
 def getDefaultUser(self, request):
 	try:
 		user = request.form['userList']
-	except:
+	except KeyError, e:
 		try:
 			user = request['user']
 		except:
 			try:
-				user = self.portal_membership.listMembers()[0].getUserName()
-			except:
+				members = list()
+				members.extend(self.portal_membership.listMembers())
+				members.sort()
+				user = members[0].getUserName()
+			except Exception, e:
+				luci_log.debug_verbose('getDefaultUser0: %s' % str(e))
 				user = None
 
+	if not user:
+		luci_log.debug_verbose('getDefaultUser1: user is none')
 	return user
 
 def getUserPerms(self):
 	perms = {}
-	for i in self.portal_membership.listMembers():
+
+	try:
+		members = list()
+		members.extend(self.portal_membership.listMembers())
+		if len(members) < 1:
+			raise Exception, 'no portal members exist'
+		members.sort()
+	except Exception, e:
+		luci_log.debug_verbose('getUserPerms0: %s' % str(e))
+		return {}
+
+	for i in members:
 		userName = i.getUserName()
 
 		perms[userName] = {}
 		perms[userName]['cluster'] = {}
 		perms[userName]['storage'] = {}
 
-		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
-		storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
+		try:
+			clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
+			storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
+		except Exception, e:
+			luci_log.debug_verbose('getUserPerms1: user %s: %s' % (userName, str(e)))
+			continue
 
 		for c in clusters:
-			perms[userName]['cluster'][c[0]] = i.has_role('View', c[1])
-
+			try:
+				perms[userName]['cluster'][c[0]] = i.has_role('View', c[1])
+			except Exception, e:
+				luci_log.debug_verbose('getUserPerms2: user %s, obj %s: %s' \
+					% (userName, c[0], str(e)))
+				continue
+				
 		for s in storage:
-			perms[userName]['storage'][s[0]] = i.has_role('View', s[1])
+			try:
+				perms[userName]['storage'][s[0]] = i.has_role('View', s[1])
+			except Exception, e:
+				luci_log.debug_verbose('getUserPerms2: user %s, obj %s: %s' \
+					% (userName, s[0], str(e)))
+				continue
 
 	return perms
 
@@ -1397,39 +1499,52 @@
 def getClusterNode(self, nodename, clustername):
 	try:
 		cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + str(clustername) + '/' + str(nodename))
+		if not cluster_node:
+			raise Exception, 'cluster node is none'
 		return cluster_node
-	except:
+	except Exception, e:
+		luci_log.debug_verbose('getClusterNode0: %s %s: %s' \
+			% (nodename, clustername, str(e)))
 		return None
 
 def getStorageNode(self, nodename):
 	try:
 		storage_node = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + '/' + str(nodename))
+		if not storage_node:
+			raise Exception, 'storage node is none'
 		return storage_node
-	except:
+	except Exception, e:
+		luci_log.debug_verbose('getStorageNode0: %s: %s' % (nodename, str(e)))
 		return None
 
 def testNodeFlag(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
+		if flags is None:
+			return False
 		return flags & flag_mask != 0
-	except:
-		pass
+	except Exception, e:
+		luci_log.debug_verbose('testNodeFlag0: %s' % str(e))
 	return False
 
 def setNodeFlag(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
+		if flags is None:
+			flags = 0
 		node.manage_changeProperties({ 'flags': flags | flag_mask })
 	except:
 		try:
 			node.manage_addProperty('flags', flag_mask, 'int')
-		except:
-			pass
+		except Exception, e:
+			luci_log.debug_verbose('setNodeFlag0: %s' % str(e))
 
 def delNodeFlag(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
+		if flags is None:
+			return
 		if flags & flag_mask != 0:
 			node.manage_changeProperties({ 'flags': flags & ~flag_mask })
-	except:
-		pass
+	except Exception, e:
+		luci_log.debug_verbose('delNodeFlag0: %s' % str(e))



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-31 17:18 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-31 17:18 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-31 17:18:14

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           ricci_communicator.py 

Log message:
	fix node flags brokenness

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.134&r2=1.135
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.15&r2=1.16

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/31 00:16:14	1.134
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/31 17:18:12	1.135
@@ -2898,31 +2898,35 @@
       try:
         cluname = req.form['clusterName']
       except:
-        luci_log.debug_verbose('No cluster name -- returning empty map')
+        luci_log.debug_verbose('ICB0: No cluster name -- returning empty map')
         return map
 
-  path = CLUSTER_FOLDER_PATH + cluname
+  path = str(CLUSTER_FOLDER_PATH + cluname)
   try:
-    clusterfolder = self.restrictedTraverse(str(path))
+    clusterfolder = self.restrictedTraverse(path)
     if not clusterfolder:
       raise Exception, 'clusterfolder is None'
   except Exception, e:
-    luci_log.debug_verbose('cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
+    luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
     return map
   except:
-    luci_log.debug_verbose('cluster %s [%s] folder missing: returning empty map' % (cluname, path))
+    luci_log.debug_verbose('ICB2: cluster %s [%s] folder missing: returning empty map' % (cluname, path))
 
   try:
     items = clusterfolder.objectItems('ManagedSystem')
     if not items or len(items) < 1:
+      luci_log.debug_verbose('ICB3: no flags at %s for cluster %s' \
+          % (cluname, path))
       return map  #This returns an empty map, and should indicate not busy
   except Exception, e:
-    luci_log.debug('An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
+    luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
     return map
   except:
-    luci_log.debug('An error occurred while looking for cluster %s flags at path %s' % (cluname, path))
+    luci_log.debug('ICB5: An error occurred while looking for cluster %s flags@path %s' % (cluname, path))
     return map
-    
+
+  luci_log.debug_verbose('ICB6: isClusterBusy: %s is busy: %d flags' \
+      % (cluname, len(items)))
   map['busy'] = "true"
   #Ok, here is what is going on...if there is an item,
   #we need to call the ricci_bridge and get a batch report.
@@ -2952,31 +2956,58 @@
       batch_xml = None
       ricci = item[0].split("____") #This removes the 'flag' suffix
 
+      luci_log.debug_verbose('ICB6A: using host %s for rc for item %s' \
+          % (ricci[0], item[0]))
       try:
         rc = RicciCommunicator(ricci[0])
+        if not rc:
+          rc = None
+          raise RicciError, 'rc is None for %s' % ricci[0]
       except RicciError, e:
         rc = None
-        luci_log.debug_verbose('ricci returned error in iCB for %s: %s' \
+        luci_log.debug_verbose('ICB7: ricci returned error in iCB for %s: %s' \
           % (cluname, str(e)))
       except:
         rc = None
-        luci_log.info('ricci connection failed for cluster %s' % cluname)
+        luci_log.info('ICB8: ricci connection failed for cluster %s' % cluname)
 
+      batch_id = None
       if rc is not None:
         try:
-          batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
-          if batch_xml != None:
-            (creation_status, total) = batch_status(batch_xml)
-          else:
-            luci_log.debug_verbose('batch report for cluster %s, item %s is None' % (cluname, item[0]))
-        except:
-          creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
-          batch_xml = "bloody_failure" #set to avoid next if statement
-      else:
+          batch_id = item[1].getProperty(BATCH_ID)
+          luci_log.debug_verbose('ICB8A: got batch_id %s from %s' \
+              % (batch_id, item[0]))
+        except Exception, e:
+          try:
+            luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %s' \
+                % (item[0], str(e)))
+          except:
+            luci_log.debug_verbose('ICB8C: failed to get batch_id from %s' % item[0])
+
+        if batch_id is not None:
+          try:
+            batch_xml = rc.batch_report(batch_id)
+            if batch_xml is not None:
+              luci_log.debug_verbose('ICB8D: batch_xml for %s from batch_report is not None -- getting batch status' % batch_id)
+              (creation_status, total) = batch_status(batch_xml)
+              try:
+                luci_log.debug_verbose('ICB8E: batch status returned (%d,%d)' \
+                    % (creation_status, total))
+              except:
+                luci_log.debug_verbose('ICB8F: error logging batch status return')
+            else:
+              luci_log.debug_verbose('ICB9: batch_xml for cluster is None')
+          except Exception, e:
+            luci_log.debug_verbose('ICB9A: error getting batch_xml from rc.batch_report: %s' % str(e))
+            creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
+            batch_xml = "bloody_failure" #set to avoid next if statement
+
+      if rc is None or batch_id is None:
+          luci_log.debug_verbose('ICB12: unable to connect to a ricci agent for cluster %s to get batch status')
           creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
-          batch_xml = "bloody_failure" #set to avoid next if statement
+          batch_xml = "bloody_bloody_failure" #set to avoid next if statement
 
-      if batch_xml == None:  #The job is done and gone from queue
+      if batch_xml is None:  #The job is done and gone from queue
         if redirect_message == False: #We have not displayed this message yet
           node_report['desc'] = REDIRECT_MSG
           node_report['iserror'] = True 
@@ -2984,7 +3015,7 @@
           nodereports.append(node_report)
           redirect_message = True
 
-        luci_log.debug_verbose('batch job is done -- deleting %s' % item[0])
+        luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
         clusterfolder.manage_delObjects(item[0])
         continue
 
@@ -3038,7 +3069,7 @@
           try:
               clusterfolder.manage_delObjects(item[0])
           except Exception, e:
-              luci_log.info('Unable to delete %s: %s' % (item[0], str(e)))
+              luci_log.info('ICB14: Unable to delete %s: %s' % (item[0], str(e)))
           continue
         else:
           map['busy'] = "true"
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/10/31 13:16:30	1.15
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/10/31 17:18:12	1.16
@@ -221,7 +221,7 @@
         return doc
 
     def batch_report(self, batch_id):
-        luci_log.debug_verbose('[auth=%d] asking for batchid# %d for host %s' \
+        luci_log.debug_verbose('[auth=%d] asking for batchid# %s for host %s' \
             % (self.__authed, batch_id, self.__hostname))
 
         if not self.authed():
@@ -244,7 +244,7 @@
         if doc.firstChild.getAttribute('success') == '12':
             return None
         if doc.firstChild.getAttribute('success') != '0':
-            raise RicciError, 'Error while retrieving batch report for batch #%s from host %s' % (batch_id, self.__hostname)
+            raise RicciError, 'Error while retrieving batch report for batch #%d from host %s' % (batch_id, self.__hostname)
         batch_node = None
         for node in doc.firstChild.childNodes:
             if node.nodeType == xml.dom.Node.ELEMENT_NODE:
@@ -403,10 +403,10 @@
                     last = last + 1
                     last = last - 2 * last
     try:
-        luci_log.debug_verbose('Returning (%s, %s) for batch_status(\"%s\")' \
+        luci_log.debug_verbose('Returning (%d, %d) for batch_status(\"%s\")' \
             % (last, total, batch_xml.toxml()))
     except:
-        pass
+        luci_log.debug_verbose('Returning last, total')
 
     return (last, total)
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-31  0:16 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-31  0:16 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-31 00:16:15

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	more logging and exception robustness

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.133&r2=1.134

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/30 22:52:00	1.133
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/31 00:16:14	1.134
@@ -1637,6 +1637,7 @@
 		try:
 			svcname = req.form['servicename']
 		except:
+			luci_log.debug_verbose('serviceStart error: no service name')
 			return None
 
 	try:
@@ -1645,22 +1646,28 @@
 		try:
 			nodename = req.form['nodename']
 		except:
-			return None
+			nodename = None
 
+	cluname = None
 	try:
 		cluname = req['clustername']
 	except KeyError, e:
 		try:
-			cluname = req.form['clusterName']
+			cluname = req.form['clustername']
 		except:
-			return None
+			pass
+
+	if cluname is None:
+		luci_log.debug_verbose('serviceStart error: %s no service name' \
+			% svcname)
+		return None
 
 	ricci_agent = rc.hostname()
 
 	batch_number, result = startService(rc, svcname, nodename)
-	if not batch_number or not result:
-		luci_log.debug_verbose('startService %s @ %s call failed' \
-			% (svcname, nodename))
+	if batch_number is None or result is None:
+		luci_log.debug_verbose('startService %s call failed' \
+			% svcname)
 		return None
 
 	#Now we need to create a DB flag for this system.
@@ -1704,17 +1711,14 @@
 		try:
 			cluname = req.form['clustername']
 		except:
-			try:
-				cluname = rc.cluster_info()[0]
-			except:
-				pass
+			pass
 
 	if cluname is None:
 		luci_log.debug_verbose('unable to determine cluser name for serviceRestart %s' % svcname)
 		return None
 
 	batch_number, result = restartService(rc, svcname)
-	if not batch_number or not result:
+	if batch_number is None or result is None:
 		luci_log.debug_verbose('restartService for %s failed' % svcname)
 		return None
 				
@@ -1762,17 +1766,14 @@
 		try:
 			cluname = req.form['clustername']
 		except:
-			try:
-				cluname = rc.cluster_info()[0]
-			except:
-				pass
+			pass
 
 	if cluname is None:
 		luci_log.debug_verbose('unable to determine cluser name for serviceStop %s' % svcname)
 		return None
 
 	batch_number, result = stopService(rc, svcname)
-	if not batch_number or not result:
+	if batch_number is None or result is None:
 		luci_log.debug_verbose('stopService for %s failed' % svcname)
 		return None
 
@@ -2097,7 +2098,7 @@
 		clustername = request['clustername']
 	except KeyError, e:
 		try:
-			clustername = request.form['clusterName']
+			clustername = request.form['clustername']
 		except:
 			luci_log.debug('missing cluster name for NTP')
 			return None
@@ -2194,16 +2195,20 @@
 			return None
 
 		batch_number, result = nodeLeaveCluster(rc)
-		batch_id = str(batch_number)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeLeaveCluster error: batch_number and/or result is None')
+			return None
 
+		batch_id = str(batch_number)
 		objpath = str(path + "/" + objname)
+
 		try:
 			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
 			#Now we need to annotate the new DB object
 			flag = self.restrictedTraverse(objpath)
 			flag.manage_addProperty(BATCH_ID, batch_id, "string")
-			flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
-			flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+			flag.manage_addProperty(TASKTYPE, NODE_LEAVE_CLUSTER, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' leaving cluster", "string")
 		except:
 			luci_log.debug('An error occurred while setting flag %s' % objpath)
 
@@ -2212,34 +2217,52 @@
 		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 	elif task == NODE_JOIN_CLUSTER:
 		batch_number, result = nodeJoinCluster(rc)
-		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-		nodefolder = self.restrictedTraverse(path)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeJoin error: batch_number and/or result is None')
+			return None
+
+		path = str(CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved)
 		batch_id = str(batch_number)
-		objname = nodename_resolved + "____flag"
-		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = path + "/" + objname
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
-		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
+		objname = str(nodename_resolved + "____flag")
+		objpath = str(path + "/" + objname)
+
+		try:
+			nodefolder = self.restrictedTraverse(path)
+			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_JOIN_CLUSTER, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' joining cluster", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeJoin error: creating flags at %s: %s' \
+				% (path, str(e)))
 
 		response = request.RESPONSE
 		#Once again, is this correct? Should we re-direct to the cluster page?
 		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 	elif task == NODE_REBOOT:
 		batch_number, result = nodeReboot(rc)
-		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-		nodefolder = self.restrictedTraverse(path)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeReboot: batch_number and/or result is None')
+			return None
+
+		path = str(CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved)
 		batch_id = str(batch_number)
-		objname = nodename_resolved + "____flag"
-		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = path + "/" + objname
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID, batch_id, "string")
-		flag.manage_addProperty(TASKTYPE, NODE_REBOOT, "string")
-		flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being rebooted", "string")
+		objname = str(nodename_resolved + "____flag")
+		objpath = str(path + "/" + objname)
+
+		try:
+			nodefolder = self.restrictedTraverse(path)
+			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_REBOOT, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being rebooted", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeReboot err: creating flags at %s: %s' \
+				% (path, str(e)))
 
 		response = request.RESPONSE
 		#Once again, is this correct? Should we re-direct to the cluster page?
@@ -2250,16 +2273,19 @@
 		try:
 			clusterfolder = self.restrictedTraverse(path)
 			if not clusterfolder:
-				raise
-		except:
-			luci_log.debug('The cluster folder for %s could not be found.' \
-				 % clustername)
+				raise Exception, 'no cluster folder at %s' % path
+		except Exception, e:
+			luci_log.debug('The cluster folder for %s could not be found: %s' \
+				 % (clustername, str(e)))
 			return None
 
 		try:
 			nodes = clusterfolder.objectItems('Folder')
-		except:
-			luci_log.debug('No cluster nodes for %s were found' % clustername)
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes'
+		except Exception, e:
+			luci_log.debug('No cluster nodes for %s were found: %s' \
+				% (clustername, str(e)))
 			return None
 
 		found_one = False
@@ -2299,17 +2325,26 @@
 			return None
 
 		batch_number, result = nodeFence(rc, nodename)
-		path = path + "/" + nodename_resolved
-		nodefolder = self.restrictedTraverse(path)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeFence: batch_number and/or result is None')
+			return None
+
+		path = str(path + "/" + nodename_resolved)
 		batch_id = str(batch_number)
-		objname = nodename_resolved + "____flag"
-		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = path + "/" + objname
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
-		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
+		objname = str(nodename_resolved + "____flag")
+		objpath = str(path + "/" + objname)
+
+		try:
+			nodefolder = self.restrictedTraverse(path)
+			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_FENCE, "string")
+			flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being fenced", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeFence err: creating flags at %s: %s' \
+				% (path, str(e)))
 
 		response = request.RESPONSE
 		#Once again, is this correct? Should we re-direct to the cluster page?
@@ -2320,17 +2355,25 @@
 		#and propogate it. We will need two ricci agents for this task.
 
 		# Make sure we can find a second node before we hose anything.
-		path = CLUSTER_FOLDER_PATH + clustername
+		path = str(CLUSTER_FOLDER_PATH + clustername)
 		try:
 			clusterfolder = self.restrictedTraverse(path)
 			if not clusterfolder:
-				raise
-		except:
+				raise Exception, 'no cluster folder at %s' % path
+		except Exception, e:
+			luci_log.debug_verbose('node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
 			return None
 
-		nodes = clusterfolder.objectItems('Folder')
-		found_one = False
+		try:
+			nodes = clusterfolder.objectItems('Folder')
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes in DB'
+		except Exception, e:
+			luci_log.debug_verbose('node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
 
+		found_one = False
 		for node in nodes:
 			if node[1].getId().find(nodename) != (-1):
 				continue
@@ -2339,38 +2382,59 @@
 			# in the cluster we believe it is.
 			try:
 				rc2 = RicciCommunicator(node[1].getId())
-				if not rc2.authed():
-					# set the flag
-					rc2 = None
-				if not rc2:
-					raise
-				found_one = True
-				break
+			except Exception, e:
+				luci_log.info('ricci %s error: %s' % (node[0], str(e)))
+				continue
 			except:
 				continue
 
+			if not rc2.authed():
+				try:
+					setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				try:
+					snode = getStorageNode(self, node[0])
+					setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				luci_log.debug_verbose('%s is not authed' % node[0])
+				rc2 = None
+				continue
+			else:
+				found_one = True
+				break
+
 		if not found_one:
+			luci_log.debug_verbose('unable to find ricci node to delete %s from %s' % (nodename, clustername))
 			return None
 
 		#First, delete cluster.conf from node to be deleted.
 		#next, have node leave cluster.
 		batch_number, result = nodeLeaveCluster(rc, purge=True)
+		if batch_number is None or result is None:
+			luci_log.debug_verbose('nodeDelete: batch_number and/or result is None')
+			return None
 
 		#It is not worth flagging this node in DB, as we are going
 		#to delete it anyway. Now, we need to delete node from model
 		#and send out new cluster.conf
 		delete_target = None
-		try:
-			nodelist = model.getNodes()
-			find_node = lower(nodename)
-			for n in nodelist:
+		nodelist = model.getNodes()
+		find_node = lower(nodename)
+		for n in nodelist:
+			try:
 				if lower(n.getName()) == find_node:
 					delete_target = n
 					break
-		except:
-			pass
+			except:
+				continue
 
 		if delete_target is None:
+			luci_log.debug_verbose('unable to find delete target for %s in %s' \
+				% (nodename, clustername))
 			return None
 
 		model.deleteNode(delete_target)
@@ -2386,6 +2450,7 @@
 		# propagate the new cluster.conf via the second node
 		batch_number, result = setClusterConf(rc2, str(str_buf))
 		if batch_number is None:
+			luci_log.debug_verbose('batch number is None after del node in NTP')
 			return None
 
 		#Now we need to delete the node from the DB
@@ -2396,19 +2461,24 @@
 			delnode = self.restrictedTraverse(del_path)
 			clusterfolder = self.restrictedTraverse(path)
 			clusterfolder.manage_delObjects(delnode[0])
-		except:
-			# XXX - we need to handle this
-			pass
+		except Exception, e:
+			luci_log.debug_verbose('error deleting %s: %s' % (del_path, str(e)))
 
 		batch_id = str(batch_number)
 		objname = str(nodename_resolved + "____flag")
-		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
 		objpath = str(path + "/" + objname)
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_DELETE, "string")
-		flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
+
+		try:
+			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE, NODE_DELETE, "string")
+			flag.manage_addProperty(FLAG_DESC, "Deleting node \'" + nodename + "\'", "string")
+		except Exception, e:
+			luci_log.debug_verbose('nodeDelete %s err setting flag@%s: %s' \
+				% (nodename, objpath, str(e)))
+
 		response = request.RESPONSE
 		response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-30 22:52 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-30 22:52 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-30 22:52:00

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	jim's fix for bz 213057

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.132&r2=1.133

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/27 18:24:05	1.132
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/30 22:52:00	1.133
@@ -744,9 +744,9 @@
   if request.REQUEST_METHOD == 'POST':
     ret = validatePost(self, request)
     try:
-		request.SESSION.set('checkRet', ret[1])
+      request.SESSION.set('checkRet', ret[1])
     except:
-		request.SESSION.set('checkRet', {})
+      request.SESSION.set('checkRet', {})
   else:
     try: request.SESSION.set('checkRet', {})
     except: pass
@@ -780,8 +780,8 @@
   else:
     cldata['currentItem'] = False
 
-
-  if havePermCreateCluster(self):
+  UserHasPerms = havePermCreateCluster(self)
+  if UserHasPerms:
     cladd = {}
     cladd['Title'] = "Create a New Cluster"
     cladd['cfg_type'] = "clusteradd"
@@ -832,7 +832,8 @@
 
   mylist = list()
   mylist.append(cldata)
-  mylist.append(cladd)
+  if UserHasPerms:
+    mylist.append(cladd)
   mylist.append(clcfg)
   dummynode['children'] = mylist
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-30 20:43 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-30 20:43 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	jparsons at sourceware.org	2006-10-30 20:43:25

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix unbound local error

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.6&r2=1.120.2.7

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/25 01:53:34	1.120.2.6
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/30 20:43:25	1.120.2.7
@@ -760,8 +760,8 @@
   else:
     cldata['currentItem'] = False
 
-
-  if havePermCreateCluster(self):
+  UserHasPerms = havePermCreateCluster(self)
+  if UserHasPerms:
     cladd = {}
     cladd['Title'] = "Create a New Cluster"
     cladd['cfg_type'] = "clusteradd"
@@ -812,7 +812,8 @@
 
   mylist = list()
   mylist.append(cldata)
-  mylist.append(cladd)
+  if UserHasPerms:
+    mylist.append(cladd)
   mylist.append(clcfg)
   dummynode['children'] = mylist
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-27  1:11 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-27  1:11 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-27 01:11:16

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	more logging for debugging

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.130&r2=1.131

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/26 22:59:13	1.130
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/27 01:11:16	1.131
@@ -1645,6 +1645,7 @@
 			nodename = req.form['nodename']
 		except:
 			return None
+
 	try:
 		cluname = req['clustername']
 	except KeyError, e:
@@ -1656,79 +1657,147 @@
 	ricci_agent = rc.hostname()
 
 	batch_number, result = startService(rc, svcname, nodename)
-	#Now we need to create a DB flag for this system.
+	if not batch_number or not result:
+		luci_log.debug_verbose('startService %s @ %s call failed' \
+			% (svcname, nodename))
+		return None
 
-	path = CLUSTER_FOLDER_PATH + cluname
-	clusterfolder = self.restrictedTraverse(path)
+	#Now we need to create a DB flag for this system.
+	path = str(CLUSTER_FOLDER_PATH + cluname)
 	batch_id = str(batch_number)
-	objname = ricci_agent + "____flag"
-	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-	#Now we need to annotate the new DB object
-	objpath = path + "/" + objname
-	flag = self.restrictedTraverse(objpath)
-	#flag[BATCH_ID] = batch_id
-	#flag[TASKTYPE] = SERVICE_START
-	#flag[FLAG_DESC] = "Starting service " + svcname
-	flag.manage_addProperty(BATCH_ID,batch_id, "string")
-	flag.manage_addProperty(TASKTYPE,SERVICE_START, "string")
-	flag.manage_addProperty(FLAG_DESC,"Starting service \'" + svcname + "\'", "string")
+	objname = str(ricci_agent + "____flag")
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = str(path + "/" + objname)
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, SERVICE_START, "string")
+		flag.manage_addProperty(FLAG_DESC, "Starting service \'" + svcname + "\'", "string")
+	except Exception, e:
+		luci_log.debug_verbose('Error creating flag at %s: %s' % (objpath, str(e)))
+
 	response = req.RESPONSE
 	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def serviceRestart(self, rc, req):
-  svcname = req['servicename']
-  batch_number, result = restartService(rc, svcname)
+	try:
+		svcname = req['servicename']
+	except KeyError, e:
+		try:
+			svcname = req.form['servicename']
+		except:
+			luci_log.debug_verbose('no service name for serviceRestart')
+			return None
+	except:
+		luci_log.debug_verbose('no service name for serviceRestart')
+		return None
 
-  ricci_agent = rc.hostname()
-  #Now we need to create a DB flag for this system.
-  cluname = req['clustername']
+	#Now we need to create a DB flag for this system.
+	cluname = None
+	try:
+		cluname = req['clustername']
+	except:
+		try:
+			cluname = req.form['clustername']
+		except:
+			try:
+				cluname = rc.cluster_info()[0]
+			except:
+				pass
 
-  path = CLUSTER_FOLDER_PATH + cluname
-  clusterfolder = self.restrictedTraverse(path)
-  batch_id = str(batch_number)
-  objname = ricci_agent + "____flag"
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-  #Now we need to annotate the new DB object
-  objpath = path + "/" + objname
-  flag = self.restrictedTraverse(objpath)
-  #flag[BATCH_ID] = batch_id
-  #flag[TASKTYPE] = SERVICE_RESTART
-  #flag[FLAG_DESC] = "Restarting service " + svcname
-  flag.manage_addProperty(BATCH_ID,batch_id, "string")
-  flag.manage_addProperty(TASKTYPE,SERVICE_RESTART, "string")
-  flag.manage_addProperty(FLAG_DESC,"Restarting service " + svcname, "string")
+	if cluname is None:
+		luci_log.debug_verbose('unable to determine cluser name for serviceRestart %s' % svcname)
+		return None
+
+	batch_number, result = restartService(rc, svcname)
+	if not batch_number or not result:
+		luci_log.debug_verbose('restartService for %s failed' % svcname)
+		return None
+				
+	ricci_agent = rc.hostname()
+
+	path = str(CLUSTER_FOLDER_PATH + cluname)
+	batch_id = str(batch_number)
+	objname = str(ricci_agent + "____flag")
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
 
-  response = req.RESPONSE
-  response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+		#Now we need to annotate the new DB object
+		objpath = str(path + "/" + objname)
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, SERVICE_RESTART, "string")
+		flag.manage_addProperty(FLAG_DESC, "Restarting service " + svcname, "string")
+	except Exception, e:
+		luci_log.debug_verbose('Error creating flag in restartService %s: %s' \
+			% (svcname, str(e)))
+
+	response = req.RESPONSE
+	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def serviceStop(self, rc, req):
-  svcname = req['servicename']
-  batch_number, result = stopService(rc, svcname)
+	try:
+		svcname = req['servicename']
+	except KeyError, e:
+		try:
+			svcname = req.form['servicename']
+		except:
+			luci_log.debug_verbose('no service name for serviceStop')
+			return None
+	except:
+		luci_log.debug_verbose('no service name for serviceStop')
+		return None
+
+	#Now we need to create a DB flag for this system.
+	cluname = None
+	try:
+		cluname = req['clustername']
+	except:
+		try:
+			cluname = req.form['clustername']
+		except:
+			try:
+				cluname = rc.cluster_info()[0]
+			except:
+				pass
 
-  #Now we need to create a DB flag for this system.
-  cluname = req['clustername']
+	if cluname is None:
+		luci_log.debug_verbose('unable to determine cluser name for serviceStop %s' % svcname)
+		return None
 
-  ricci_agent = rc.hostname()
+	batch_number, result = stopService(rc, svcname)
+	if not batch_number or not result:
+		luci_log.debug_verbose('stopService for %s failed' % svcname)
+		return None
 
-  path = CLUSTER_FOLDER_PATH + cluname
-  clusterfolder = self.restrictedTraverse(path)
-  batch_id = str(batch_number)
-  objname = ricci_agent + "____flag"
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-  #Now we need to annotate the new DB object
-  objpath = path + "/" + objname
-  flag = self.restrictedTraverse(objpath)
-  #flag[BATCH_ID] = batch_id
-  #flag[TASKTYPE] = SERVICE_STOP
-  #flag[FLAG_DESC] = "Stopping service " + svcname
-  flag.manage_addProperty(BATCH_ID,batch_id,"string")
-  flag.manage_addProperty(TASKTYPE,SERVICE_STOP, "string")
-  flag.manage_addProperty(FLAG_DESC,"Stopping service " + svcname,"string")
+	ricci_agent = rc.hostname()
 
-  time.sleep(2)
+	path = str(CLUSTER_FOLDER_PATH + cluname)
+	batch_id = str(batch_number)
+	objname = str(ricci_agent + "____flag")
 
-  response = req.RESPONSE
-  response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		objpath = str(path + "/" + objname)
+		flag = self.restrictedTraverse(objpath)
+
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, SERVICE_STOP, "string")
+		flag.manage_addProperty(FLAG_DESC, "Stopping service " + svcname, "string")
+		time.sleep(2)
+	except Exception, e:
+		luci_log.debug_verbose('Error creating flags for stopService %s: %s' \
+			% (svcname, str(e)))
+
+	response = req.RESPONSE
+	response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
 def getFdomsInfo(self, modelb, request, clustatus):
   slist = list()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-25  0:43 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-25  0:43 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-25 00:43:48

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	fix logging

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.127&r2=1.128
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.32&r2=1.33

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/24 15:05:28	1.127
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/25 00:43:48	1.128
@@ -2594,27 +2594,43 @@
 		try:
 			nodename = request.form['nodename']
 		except:
-			return "Unable to resolve node name %s to retrieve logging information" % nodename
+			luci_log.debug_verbose('Unable to get node name to retrieve logging information')
+			return 'Unable to get node name to retrieve logging information'
 
+	clustername = None
 	try:
 		clustername = request['clustername']
 	except KeyError, e:
 		try:
 			clustername = request.form['clusterName']
+			if not clustername:
+				raise
 		except:
-			return "Unable to resolve node name %s to retrieve logging information" % nodename
-
-	try:
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
+			clustername = None
+			luci_log.debug_verbose('Unable to find cluster name while retrieving logging information for %s' % nodename)
 	except:
-		return "Unable to resolve node name %s to retrieve logging information" % nodename
+		pass
+
+	if clustername is None:
+		nodename_resolved = nodename
+	else:
+		try:
+			nodename_resolved = resolve_nodename(self, clustername, nodename)
+		except:
+			luci_log.debug_verbose('Unable to resolve node name %s/%s to retrieve logging information' \
+				% (nodename, clustername))
+			return 'Unable to resolve node name for %s in cluster %s' % (nodename, clustername)
 
 	try:
 		rc = RicciCommunicator(nodename_resolved)
-		if not rc:
-			raise
-	except:
-		return "Unable to resolve node name %s to retrieve logging information" % nodename_resolved
+	except RicciError, e:
+		luci_log.debug_verbose('Ricci error while getting logs for %s: %s' \
+			% (nodename_resolved, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+	except:
+		luci_log.debug_verbose('Unexpected exception while getting logs for %s' \
+			% nodename_resolved)
+		return 'Ricci error while getting logs for %s' % nodename_resolved
 
 	if not rc.authed():
 		try:
@@ -2622,7 +2638,15 @@
 			setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
 		except:
 			pass
-		return "Luci is not authenticated to node %s. Please reauthenticate first." % nodename
+
+		if clustername:
+			try:
+				cnode = getClusterNode(self, nodename, clustername)
+				setNodeFlag(cnode, CLUSTER_NODE_NEED_AUTH)
+			except:
+				pass
+
+		return 'Luci is not authenticated to node %s. Please reauthenticate first.' % nodename
 
 	return getNodeLogs(rc)
 
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/23 19:24:39	1.32
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/25 00:43:48	1.33
@@ -1,4 +1,5 @@
 import xml
+from time import time, ctime
 from xml.dom import minidom
 from ricci_communicator import RicciCommunicator
 
@@ -284,10 +285,52 @@
 	batch_str = '<module name="log"><request sequence="1254" API_version="1.0"><function_call name="get"><var mutable="false" name="age" type="int" value="18000"/><var mutable="false" name="tags" type="list_str"><listentry value="cluster"/></var></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str, async=False)
-	doc = getPayload(ricci_xml)
-	if not doc or not doc.firstChild:
+	if not ricci_xml:
 		return errstr
-	return doc.firstChild
+	try:
+		log_entries = ricci_xml.getElementsByTagName('logentry')
+		if not log_entries or len(log_entries) < 1:
+			raise Exception, 'no log data is available.'
+	except Exception, e:
+		'Error retrieving log data from %s: %s' \
+			% (rc.hostname(), str(e))
+		return None
+	time_now = time()
+	entry = ''
+	for i in log_entries:
+		try:
+			log_msg = i.getAttribute('msg')
+		except:
+			log_msg = ''
+
+		if not log_msg:
+			continue
+
+		try:
+			log_age = int(i.getAttribute('age'))
+		except:
+			log_age = 0
+
+		try:
+			log_domain = i.getAttribute('domain')
+		except:
+			log_domain = ''
+
+		try:
+			log_pid = i.getAttribute('pid')
+		except:
+			log_pid = ''
+
+		if log_age:
+			entry += ctime(time_now - log_age) + ' '
+		if log_domain:
+			entry += log_domain
+		if log_pid:
+			entry += '[' + log_pid + ']' + ': '
+		else
+			entry += ': '
+		entry += log_msg + '<br/>'
+	return entry
 
 def nodeReboot(rc):
 	batch_str = '<module name="reboot"><request sequence="111" API_version="1.0"><function_call name="reboot_now"/></request></module>'



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-24 14:08 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-24 14:08 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-24 14:08:50

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	more logging/exception code for debugging

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.125&r2=1.126

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/20 22:29:22	1.125
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/24 14:08:50	1.126
@@ -443,7 +443,7 @@
 		try:
 			resObj = resourceAddHandler[res_type](self, dummy_form)
 		except:
-			luci_log
+			luci_log('res type %d is invalid' % res_type)
 			resObj = None
 
 		if resObj is None:
@@ -2009,6 +2009,7 @@
 		try:
 			clustername = request.form['clusterName']
 		except:
+			luci_log.debug('missing cluster name for NTP')
 			return None
 
 	try:
@@ -2017,20 +2018,21 @@
 		try:
 			nodename = request.form['nodename']
 		except:
+			luci_log.debug('missing nodename name for NTP')
 			return None
 
 	try:
 		task = request['task']
-		if not task:
-			raise
 	except KeyError, e:
 		try:
 			task = request.form['task']
 		except:
+			luci_log.debug('missing task for NTP')
 			return None
 
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
 	if not nodename_resolved or not nodename or not task or not clustername:
+		luci_log.debug('resolve_nodename failed for NTP')
 		return None
 
 	if task != NODE_FENCE:
@@ -2078,22 +2080,42 @@
 			return None
 
 	if task == NODE_LEAVE_CLUSTER:
-		batch_number, result = nodeLeaveCluster(rc)
+		path = str(CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved)
 
-		path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
-		nodefolder = self.restrictedTraverse(path)
+		try:
+			nodefolder = self.restrictedTraverse(path)
+			if not nodefolder:
+				raise Exception, 'cannot find directory at %s' % path
+		except Exception, e:
+			luci_log.debug('node_leave_cluster err: %s' % str(e))
+			return None
+
+		objname = str(nodename_resolved + "____flag")
+
+		fnpresent = noNodeFlagsPresent(self, nodefolder, objname, nodename_resolved)
+		if fnpresent is None:
+			luci_log.debug('An error occurred while checking flags for %s' \
+				% nodename_resolved)
+			return None
+
+		if fnpresent == False:
+			luci_log.debug('flags are still present for %s -- bailing out' \
+				% nodename_resolved)
+			return None
+
+		batch_number, result = nodeLeaveCluster(rc)
 		batch_id = str(batch_number)
-		objname = nodename_resolved + "____flag"
-		if noNodeFlagsPresent(self, nodefolder, objname, nodename_resolved) == False:
-			raise UnknownClusterError("Fatal", "An unfinished task flag exists for node %s" % nodename)
 
-		nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-		#Now we need to annotate the new DB object
-		objpath = path + "/" + objname
-		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
-		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+		objpath = str(path + "/" + objname)
+		try:
+			nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			#Now we need to annotate the new DB object
+			flag = self.restrictedTraverse(objpath)
+			flag.manage_addProperty(BATCH_ID, batch_id, "string")
+			flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
+			flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+		except:
+			luci_log.debug('An error occurred while setting flag %s' % objpath)
 
 		response = request.RESPONSE
 		#Is this correct? Should we re-direct to the cluster page?
@@ -2670,22 +2692,29 @@
       try:
         cluname = req.form['clusterName']
       except:
+        luci_log.debug_verbose('No cluster name -- returning empty map')
         return map
 
   path = CLUSTER_FOLDER_PATH + cluname
   try:
     clusterfolder = self.restrictedTraverse(str(path))
     if not clusterfolder:
-      raise
-  except:
+      raise Exception, 'clusterfolder is None'
+  except Exception, e:
+    luci_log.debug_verbose('cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
     return map
+  except:
+    luci_log.debug_verbose('cluster %s [%s] folder missing: returning empty map' % (cluname, path))
 
   try:
     items = clusterfolder.objectItems('ManagedSystem')
     if not items or len(items) < 1:
       return map  #This returns an empty map, and should indicate not busy
+  except Exception, e:
+    luci_log.debug('An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
+    return map
   except:
-    luci_log.debug('An error occurred while looking for cluster %s flags' % cluname)
+    luci_log.debug('An error occurred while looking for cluster %s flags@path %s' % (cluname, path))
     return map
     
   map['busy'] = "true"
@@ -2716,14 +2745,30 @@
       node_report['desc'] = item[1].getProperty(FLAG_DESC) 
       batch_xml = None
       ricci = item[0].split("____") #This removes the 'flag' suffix
+
       try:
         rc = RicciCommunicator(ricci[0])
-        batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
-        if batch_xml != None:
-          (creation_status, total) = batch_status(batch_xml)
+      except RicciError, e:
+        rc = None
+        luci_log.debug_verbose('ricci returned error in iCB for %s: %s' \
+          % (cluname, str(e)))
       except:
-        creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
-        batch_xml = "bloody_failure" #set to avoid next if statement
+        rc = None
+        luci_log.info('ricci connection failed for cluster %s' % cluname)
+
+      if rc is not None:
+        try:
+          batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
+          if batch_xml != None:
+            (creation_status, total) = batch_status(batch_xml)
+          else:
+            luci_log.debug_verbose('batch report for cluster %s, item %s is None' % (cluname, item[0]))
+        except:
+          creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
+          batch_xml = "bloody_failure" #set to avoid next if statement
+      else:
+          creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
+          batch_xml = "bloody_failure" #set to avoid next if statement
 
       if batch_xml == None:  #The job is done and gone from queue
         if redirect_message == False: #We have not displayed this message yet
@@ -2732,6 +2777,8 @@
           node_report['errormessage'] = ""
           nodereports.append(node_report)
           redirect_message = True
+
+        luci_log.debug_verbose('batch job is done -- deleting %s' % item[0])
         clusterfolder.manage_delObjects(item[0])
         continue
 
@@ -3620,7 +3667,7 @@
 		items = nodefolder.objectItems('ManagedSystem')
 	except:
 		luci_log.debug('An error occurred while trying to list flags for cluster ' + nodefolder[0])
-		return False
+		return None
 
 	for item in items:
 		if item[0] != flagname:
@@ -3631,7 +3678,7 @@
 			rc = RicciCommunicator(hostname)
 		except RicciError, e:
 			luci_log.info('Unable to connect to the ricci daemon: %s' % str(e))
-			return False
+			return None
 
 		if not rc.authed():
 			try:
@@ -3649,11 +3696,12 @@
 			except Exception, e:
 				luci_log.info('manage_delObjects for %s failed: %s' \
 					% (item[0], str(e)))
-				return False
+				return None
 			return True
 		else:
 			#Not finished, so cannot remove flag
 			return False
+
 	return True
 
 def getModelBuilder(rc, isVirtualized):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-23 20:47 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-23 20:47 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	jparsons at sourceware.org	2006-10-23 20:47:10

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix for bz 211345

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.2&r2=1.120.2.3

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/20 22:09:38	1.120.2.2
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/23 20:47:10	1.120.2.3
@@ -16,6 +16,7 @@
 from NFSClient import NFSClient
 from NFSExport import NFSExport
 from Netfs import Netfs
+from Xenvm import Xenvm
 from Script import Script
 from Samba import Samba
 from clusterOS import resolveOSType



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-20 22:09 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-20 22:09 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2006-10-20 22:09:39

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix for fix for bz# 211104..

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.1&r2=1.120.2.2

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/19 14:57:17	1.120.2.1
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/20 22:09:38	1.120.2.2
@@ -1225,7 +1225,7 @@
 def getTabs(self, req):
   ###XXX Make this method return only tabs current user can use
   portaltabs = list()
-  if userAuthenticated(self):
+  if not userAuthenticated(self):
     return portaltabs
   selectedtab = "homebase"
   try:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-20 21:59 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-20 21:59 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-20 21:59:54

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	more logging for debug

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.122&r2=1.123

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/18 23:12:31	1.122
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/20 21:59:54	1.123
@@ -2594,13 +2594,21 @@
 	except:
 		return "Unable to resolve node name %s to retrieve logging information" % nodename_resolved
 
+	if not rc.authed():
+		try:
+			snode = getStorageNode(self, nodename)
+			setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+		except:
+			pass
+		return "Luci is not authenticated to node %s. Please reauthenticate first." % nodename
+
 	return getNodeLogs(rc)
 
 def processXenVM(self, req):
   model = req.SESSION.get('model')
   isNew = False
   try:
-    xenvmname = req	['servicename']
+    xenvmname = req['servicename']
   except KeyError, e:
     isNew = True
   
@@ -2623,14 +2631,27 @@
     
 
 def getXenVMInfo(self, model, request):
-  try:
-    xenvmname = request['servicename']
-  except KeyError, e:
-    return {}
-  
-  xenvm = model.retrieveXenVMsByName(xenvmname)
-  map = xenvm.getAttributes()
-  return map
+	try:
+		xenvmname = request['servicename']
+	except KeyError, e:
+		try:
+			xenvmname = request.form['servicename']
+		except:
+			luci_log.debug_verbose('servicename is missing from request')
+			return {}
+	except:
+		luci_log.debug_verbose('servicename is missing from request')
+		return {}
+
+	try:  
+		xenvm = model.retrieveXenVMsByName(xenvmname)
+	except:
+		luci_log.debug('An error occurred while attempting to get VM %s' \
+			% xenvmname)
+		return {}
+
+	map = xenvm.getAttributes()
+	return map
 
 def isClusterBusy(self, req):
   items = None
@@ -2661,9 +2682,10 @@
 
   try:
     items = clusterfolder.objectItems('ManagedSystem')
-    if len(items) == 0:
+    if not items or len(items) < 1:
       return map  #This returns an empty map, and should indicate not busy
   except:
+    luci_log.debug('An error occurred while looking for cluster %s flags' % cluname)
     return map
     
   map['busy'] = "true"
@@ -2760,7 +2782,10 @@
           node_report['statusmessage'] = "Node created successfully" + REDIRECT_MSG
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
-          clusterfolder.manage_delObjects(item[0])
+          try:
+              clusterfolder.manage_delObjects(item[0])
+          except Exception, e:
+              luci_log.info('Unable to delete %s: %s' % (item[0], str(e)))
           continue
         else:
           map['busy'] = "true"
@@ -2783,13 +2808,17 @@
       if finished == True:
         node_report['desc'] = item[1].getProperty(FLAG_DESC) + REDIRECT_MSG
         nodereports.append(node_report)
-        clusterfolder.manage_delObjects(item[0])
+        try:
+            clusterfolder.manage_delObjects(item[0])
+        except Exception, e:
+            luci_log.info('Unable to delete %s: %s' % (item[0], str(e)))
       else:
         node_report = {}
         map['busy'] = "true"
         isBusy = True
         node_report['desc'] = item[1].getProperty(FLAG_DESC)
         nodereports.append(node_report)
+
   if isBusy:
     part1 = req['ACTUAL_URL']
     part2 = req['QUERY_STRING']
@@ -2809,12 +2838,14 @@
 
 def getClusterOS(self, rc):
 	map = {}
+
 	try:
 		os_str = resolveOSType(rc.os())
 		map['os'] = os_str
 		map['isVirtualized'] = rc.dom0()
 	except:
 		# default to rhel5 if something crazy happened.
+		luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
 		map['os'] = 'rhel5'
 		map['isVirtualized'] = False
 	return map
@@ -2829,8 +2860,10 @@
 		try:
 			cluname = request.form['clustername']
 		except:
+			luci_log.debug_verbose('getResourcesInfo missing cluster name')
 			return resList
 	except:
+		luci_log.debug_verbose('getResourcesInfo missing cluster name')
 		return resList
 
 	for item in modelb.getResources():
@@ -2850,8 +2883,10 @@
 		try:
 			name = request.form['resourcename']
 		except:
+			luci_log.debug_verbose('getResourceInfo missing res name')
 			return {}
 	except:
+		luci_log.debug_verbose('getResourceInfo missing res name')
 		return {}
 
 	try:
@@ -2860,19 +2895,22 @@
 		try:
 			cluname = request.form['clustername']
 		except:
+			luci_log.debug_verbose('getResourceInfo missing cluster name')
 			return {}
 	except:
+		luci_log.debug_verbose('getResourceInfo missing cluster name')
 		return {}
 
 	try:
 		baseurl = request['URL']
 	except:
+		luci_log.debug_verbose('getResourceInfo missing URL')
 		return {}
 
 	for res in modelb.getResources():
 		if res.getName() == name:
-			resMap = {}
 			try:
+				resMap = {}
 				resMap['name'] = res.getName()
 				resMap['type'] = res.resource_type
 				resMap['tag_name'] = res.TAG_NAME
@@ -2880,7 +2918,7 @@
 				resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
 				return resMap
 			except:
-				return {}
+				continue
 
 def delResource(self, rc, request):
 	errstr = 'An error occurred in while attempting to set the cluster.conf'
@@ -2888,11 +2926,19 @@
 	try:
 		modelb = request.SESSION.get('model')
 	except:
+		luci_log.debug_verbose('delResource unable to extract model from SESSION')
 		return errstr
 
 	try:
 		name = request['resourcename']
 	except KeyError, e:
+		try:
+			name = request.form['resourcename']
+		except:
+			luci_log.debug_verbose('delResource missing resname %s' % str(e))
+			return errstr + ': ' + str(e)
+	except:
+		luci_log.debug_verbose('delResource missing resname')
 		return errstr + ': ' + str(e)
 
 	try:
@@ -2901,6 +2947,7 @@
 		try:
 			clustername = request.form['clustername']
 		except:
+			luci_log.debug_verbose('delResource missing cluster name')
 			return errstr + ': could not determine the cluster name.'
 
 	try:
@@ -2921,6 +2968,7 @@
 			break
 
 	if not found:
+		luci_log.debug_verbose('delresource cant find res %s' % name)
 		return errstr + ': the specified resource was not found.'
 
 	try:
@@ -2928,10 +2976,12 @@
 		if not conf:
 			raise
 	except:
+		luci_log.debug_verbose('exportModelAsString failed')
 		return errstr
 
 	batch_number, result = setClusterConf(str(conf))
 	if batch_number is None or result is None:
+		luci_log.debug_verbose('missing batch and/or result from setClusterConf')
 		return errstr
 
 	modelstr = ""
@@ -2939,13 +2989,20 @@
 	clusterfolder = self.restrictedTraverse(path)
 	batch_id = str(batch_number)
 	objname = str(ragent) + '____flag'
-	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-	#Now we need to annotate the new DB object
 	objpath = str(path + '/' + objname)
-	flag = self.restrictedTraverse(objpath)
-	flag.manage_addProperty(BATCH_ID, batch_id, "string")
-	flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
-	flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
+
+	try:
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
+		flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
+	except Exception, e:
+		luci_log.debug('An error occurred while setting flag %s: %s' \
+			% (objname, str(e)))
+	except:
+		luci_log.debug('An error occurred while setting flag %s' % objname)
 
 	response = request.RESPONSE
 	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -2953,8 +3010,8 @@
 def addIp(request, form=None):
 	if form is None:
 		form = request.form
-	modelb = request.SESSION.get('model')
 
+	modelb = request.SESSION.get('model')
 	if not modelb or not form:
 		return None
 
@@ -3069,6 +3126,7 @@
 def addGfs(request, form=None):
 	if form is None:
 		form = request.form
+
 	modelb = request.SESSION.get('model')
 	if not modelb:
 		return None
@@ -3079,13 +3137,21 @@
 			if not oldname:
 				raise KeyError('oldname is blank.')
 			res = getResourceForEdit(modelb, oldname)
+			if not res:
+				luci_log.debug('resource %s was not found for editing' % oldname)
+				return None
 		except KeyError, e:
+			luci_log.debug('resource %s was not found for editing: %s' \
+				% (oldname, str(e)))
 			return None
 	else:
-		res = apply(Clusterfs)
-
-	if not res:
-		return None
+		try:
+			res = apply(Clusterfs)
+			if not res:
+				raise
+		except:
+			luci_log.debug('Error creating node Clusterfs resource')
+			return None
 
 	# XXX: sanity check these fields
 	try:
@@ -3094,30 +3160,35 @@
 			raise
 		res.attr_hash['name'] = name
 	except:
+		luci_log.debug_verbose('name is missing in clusterfs res')
 		return None
 
 	try:
 		mountpoint = form['mountpoint'].strip()
 		res.attr_hash['mountpoint'] = mountpoint
 	except:
+		luci_log.debug_verbose('mountpoint is missing in clusterfs res')
 		return None
 
 	try:
 		device = form['device'].strip()
 		res.attr_hash['device'] = device
 	except:
+		luci_log.debug_verbose('device is missing in clusterfs res')
 		return None
 
 	try:
 		options = form['options'].strip()
 		res.attr_hash['options'] = options
 	except:
+		luci_log.debug_verbose('options is missing in clusterfs res')
 		return None
 
 	try:
 		fsid = form['fsid'].strip()
 		res.attr_hash['fsid'] = fsid
 	except:
+		luci_log.debug_verbose('fsid is missing in clusterfs res')
 		return None
 
 	if form.has_key('forceunmount'):
@@ -3373,16 +3444,20 @@
 	try:
 		mb_nodes = modelb.getNodes()
 		if not mb_nodes or not len(mb_nodes):
-			raise
-	except:
-		return 'Unable to find cluster nodes for ' + clusterName
+			raise Exception, 'node list is empty'
+	except Exception, e:
+		luci_log.debug_verbose('no model builder nodes found for %s: %s' \
+				% (str(e), clusterName))
+		return 'Unable to find cluster nodes for %s' % clusterName
 
 	try:
 		cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
 		if not cluster_node:
-			raise
-	except:
-		return 'Unable to find an entry for ' + clusterName + ' in the Luci database.'
+			raise Exception, 'cluster node is none'
+	except Exception, e:
+		luci_log.debug('cant find cluster node for %s: %s'
+			% (clusterName, str(e)))
+		return 'Unable to find an entry for %s in the Luci database.' % clusterName
 
 	try:
 		db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
@@ -3469,9 +3544,11 @@
 	try:
 		ragent = rc.hostname()
 		if not ragent:
+			luci_log.debug('missing hostname')
 			raise
 		batch_number, result = setClusterConf(str(conf))
 		if batch_number is None or result is None:
+			luci_log.debug('missing batch_number or result')
 			raise
 	except:
 		return "Some error occured in setClusterConf\n"
@@ -3480,17 +3557,24 @@
 	clusterfolder = self.restrictedTraverse(path)
 	batch_id = str(batch_number)
 	objname = str(ragent + '____flag')
-	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-	#Now we need to annotate the new DB object
 	objpath = str(path + '/' + objname)
-	flag = self.restrictedTraverse(objpath)
-	flag.manage_addProperty(BATCH_ID, batch_id, "string")
-	flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
 
-	if type != 'ip':
-		flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
-	else:
-		flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+	try:
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		#Now we need to annotate the new DB object
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
+
+		if type != 'ip':
+			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
+		else:
+			flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+	except Exception, e:
+		try:
+			luci_log.info('Unable to create flag %s: %s' % (objpath, str(e)))
+		except:
+			pass
 
 	response = request.RESPONSE
 	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -3503,24 +3587,33 @@
 		if res.getName() == name:
 			resPtr.removeChild(res)
 			return res
+
+	luci_log.debug_verbose('unable to find resource \"%s\"' % name)
 	raise KeyError, name
 
 def appendModel(request, model):
 	try:
 		request.SESSION.set('model', model)
 	except:
-		pass
-
-	return False
+		luci_log.debug_verbose('Appending model to request failed')
+		return False
 
 def resolve_nodename(self, clustername, nodename):
-	path = CLUSTER_FOLDER_PATH + clustername
-	clusterfolder = self.restrictedTraverse(path)
-	objs = clusterfolder.objectItems('Folder')
+	path = str(CLUSTER_FOLDER_PATH + clustername)
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		objs = clusterfolder.objectItems('Folder')
+	except Exception, e:
+		luci_log.info('resolve_nodename failed for %s/%s: %s' \
+			% (nodename, clustername, str(e)))
+
 	for obj in objs:
 		if obj[0].find(nodename) != (-1):
 			return obj[0]
-	raise
+
+	luci_log.info('resolve_nodename failed for %s/%s' % (nodename, clustername))
+	return None
 
 def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
 	try:
@@ -3536,15 +3629,26 @@
 		#a flag already exists... try to delete it
 		try:
 			rc = RicciCommunicator(hostname)
-		except:
-			luci_log.info('Unable to connect to the ricci daemon on host ' + hostname)
+		except RicciError, e:
+			luci_log.info('Unable to connect to the ricci daemon: %s' % str(e))
 			return False
 
+		if not rc.authed():
+			try:
+				snode = getStorageNode(self, hostname)
+				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+			except:
+				pass
+			luci_log.info('Node %s is not authenticated' % item[0])
+			return None
+
 		finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
 		if finished == True:
 			try:
 				nodefolder.manage_delObjects(item[0])
-			except:
+			except Exception, e:
+				luci_log.info('manage_delObjects for %s failed: %s' \
+					% (item[0], str(e)))
 				return False
 			return True
 		else:
@@ -3552,8 +3656,22 @@
 			return False
 	return True
 
-def getModelBuilder(rc,isVirtualized):
-	cluster_conf_node = getClusterConf(rc)
-	modelb = ModelBuilder(0, None, None, cluster_conf_node)
+def getModelBuilder(rc, isVirtualized):
+	try:
+		cluster_conf_node = getClusterConf(rc)
+		if not cluster_conf_node:
+			raise;
+	except:
+		luci_log.debug('unable to get cluster_conf_node in getModelBuilder')
+		return None
+
+	try:
+		modelb = ModelBuilder(0, None, None, cluster_conf_node)
+	except Exception, e:
+		try:
+			luci_log.debug('An error occurred while trying to get modelb for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
+		except:
+			pass
+
 	modelb.setIsVirtualized(isVirtualized)
 	return modelb



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-19 14:57 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-19 14:57 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2006-10-19 14:57:17

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           storage_adapters.py 

Log message:
	fix for bz# 211104

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120&r2=1.120.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/storage_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.7&r2=1.7.2.1

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 21:01:25	1.120
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/19 14:57:17	1.120.2.1
@@ -21,7 +21,7 @@
 from clusterOS import resolveOSType
 from GeneralError import GeneralError
 from UnknownClusterError import UnknownClusterError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag
+from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -1225,6 +1225,8 @@
 def getTabs(self, req):
   ###XXX Make this method return only tabs current user can use
   portaltabs = list()
+  if userAuthenticated(self):
+    return portaltabs
   selectedtab = "homebase"
   try:
     baseurl = req['URL']
--- conga/luci/site/luci/Extensions/storage_adapters.py	2006/10/16 04:51:32	1.7
+++ conga/luci/site/luci/Extensions/storage_adapters.py	2006/10/19 14:57:17	1.7.2.1
@@ -370,50 +370,3 @@
   url += '&' + STONAME + '=' + hostname
   return url
 
-
-def getTabs(self, req):
-  ###XXX Make this method return only tabs current user can use
-  portaltabs = list()
-  selectedtab = "homebase"
-  try:
-    baseurl = req['URL']
-    if baseurl.find("cluster") > (-1):
-      selectedtab = "cluster"
-    elif baseurl.find("storage") > (-1):
-      selectedtab = "storage"
-    else:
-      selectedtab = "homebase"
-  except KeyError, e:
-    pass
-
-  htab = { 'Title':"homebase",
-           'Description':"Home base for this luci server", 
-           'Taburl':"../homebase"}
-  if selectedtab == "homebase":
-    htab['isSelected'] = True
-  else:
-    htab['isSelected'] = False
-      
-
-  ctab = { 'Title':"cluster",
-           'Description':"Cluster configuration page", 
-           'Taburl':"../cluster?pagetype=3"}
-  if selectedtab == "cluster":
-    ctab['isSelected'] = True
-  else:
-    ctab['isSelected'] = False
-
-  stab = { 'Title':"storage",
-           'Description':"Storage configuration page", 
-           'Taburl':"../storage"}
-  if selectedtab == "storage":
-    stab['isSelected'] = True
-  else:
-    stab['isSelected'] = False
-
-  portaltabs.append(htab) 
-  portaltabs.append(ctab) 
-  portaltabs.append(stab) 
-
-  return portaltabs
-



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-18 23:12 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-18 23:12 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-18 23:12:31

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 

Log message:
	better error handling
	log important (or those useful for debugging) errors to syslog

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.121&r2=1.122
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.34&r2=1.35

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/18 19:16:17	1.121
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/18 23:12:31	1.122
@@ -22,7 +22,8 @@
 from clusterOS import resolveOSType
 from GeneralError import GeneralError
 from UnknownClusterError import UnknownClusterError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated
+from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
+from LuciSyslog import LuciSyslogError, LuciSyslog
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -34,6 +35,11 @@
 
 CLUSTER_FOLDER_PATH = '/luci/systems/cluster/'
 
+try:
+	luci_log = LuciSyslog()
+except LuciSyslogError, e:
+	pass
+
 def validateClusterNodes(request, sessionData, clusterName, numStorage):
 	nodeList = list()
 	nodeHash = {}
@@ -205,11 +211,24 @@
 		batch_id_map = {}
 		rc = None
 		for i in nodeList:
+			success = True
 			try:
 				rc = RicciCommunicator(i['ricci_host'])
-				resultNode = rc.process_batch(batchNode, async=True)
-				batch_id_map[i['ricci_host']] = resultNode.getAttribute('batch_id')
+			except RicciError, e:
+				luci_log.debug('Unable to connect to the ricci agent on %s: %s'\
+					% (i['ricci_host'], str(e)))
+				success = False
 			except:
+				success = False
+
+			if success == True:
+				try:
+					resultNode = rc.process_batch(batchNode, async=True)
+					batch_id_map[i['ricci_host']] = resultNode.getAttribute('batch_id')
+				except:
+					success = False
+
+			if not success:
 				nodeUnauth(nodeList)
 				cluster_properties['isComplete'] = False
 				errors.append('An error occurred while attempting to add cluster node \"' + i['ricci_host'] + '\"')
@@ -294,6 +313,7 @@
 		clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
 		cluster_os = clusterObj.manage_getProperty('cluster_os')
 		if not cluster_os:
+			luci_log.debug('The cluster OS property is missing for cluster ' + clusterName)
 			raise Exception, 'no cluster OS was found.'
 		try:
 			if len(filter(lambda x: x['os'] != cluster_os, nodeList)) > 0:
@@ -342,17 +362,28 @@
 	batch_id_map = {}
 	for i in nodeList:
 		clunode = nodeList[i]
+		success = True
 		try:
 			rc = RicciCommunicator(clunode['ricci_host'])
-			resultNode = rc.process_batch(batchNode, async=True)
-			batch_id_map[clunode['ricci_host']] = resultNode.getAttribute('batch_id')
-			messages.append('Cluster join initiated for host \"' + clunode['ricci_host'] + '\"')
 		except:
+			luci_log.info('Unable to connect to the ricci daemon on host ' + clunode['ricci_host'])
+			success = False
+
+		if success:
+			try:
+				resultNode = rc.process_batch(batchNode, async=True)
+				batch_id_map[clunode['ricci_host']] = resultNode.getAttribute('batch_id')
+			except:
+				success = False
+
+		if not success:
 			nodeUnauth(nodeList)
 			cluster_properties['isComplete'] = False
 			errors.append('An error occurred while attempting to add cluster node \"' + clunode['ricci_host'] + '\"')
 			return (False, {'errors': errors, 'requestResults': cluster_properties})
 
+			messages.append('Cluster join initiated for host \"' + clunode['ricci_host'] + '\"')
+
 	buildClusterCreateFlags(self, batch_id_map, clusterName)
 	return (True, {'errors': errors, 'messages': messages})
 
@@ -412,6 +443,7 @@
 		try:
 			resObj = resourceAddHandler[res_type](self, dummy_form)
 		except:
+			luci_log
 			resObj = None
 
 		if resObj is None:
@@ -1304,9 +1336,12 @@
 	try:
 		clusterfolder = self.restrictedTraverse(path)
 		if not clusterfolder:
+			luci_log.debug('cluster folder %s for %s is missing.' \
+				% (path, clustername))
 			raise
 		nodes = clusterfolder.objectItems('Folder')
 		if len(nodes) < 1:
+			luci_log.debug('no cluster nodes for %s found.' % clustername)
 			return None
 	except:
 		return None
@@ -1324,15 +1359,15 @@
 
 		try:
 			rc = RicciCommunicator(hostname)
-			if not rc:
-				raise
-		except:
-			#raise Exception, ('unable to communicate with the ricci agent on %s', hostname)
+		except RicciError, e:
+			luci_log.debug('ricci error: %s' % str(e))
 			continue
 
 		try:
 			clu_info = rc.cluster_info()
 			if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
+				luci_log.debug('%s reports it\'s in cluster %s:%s; we expect %s' \
+					 % (hostname, clu_info[0], clu_info[1], cluname))
 				# node reports it's in a different cluster
 				raise
 		except:
@@ -1340,7 +1375,9 @@
 
 		if rc.authed():
 			return rc
-		setNodeFlag(self, node[1], CLUSTER_NODE_NEED_AUTH)
+		setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+
+	luci_log.debug('no ricci agent could be found for cluster %s' % cluname)
 	return None
 
 def getRicciAgentForCluster(self, req):
@@ -1352,11 +1389,13 @@
 			if not clustername:
 				raise
 		except:
+			luci_log.debug('no cluster name was specified in getRicciAgentForCluster')
 			return None
 	return getRicciAgent(self, clustername)
 
 def getClusterStatus(self, rc):
 	clustatus_batch ='<?xml version="1.0" ?><batch><module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module></batch>'
+
 	try:
 		clustatuscmd_xml = minidom.parseString(clustatus_batch).firstChild
 	except:
@@ -1364,6 +1403,8 @@
 
 	try:
 		ricci_xml = rc.process_batch(clustatuscmd_xml, async=False)
+	except RicciError, e:
+		luci_log.debug('ricci error: %s', str(e))
 	except:
 		return {}
 
@@ -1998,16 +2039,44 @@
 		# to be performed.
 		try:
 			rc = RicciCommunicator(nodename_resolved)
-			# XXX - check the cluster
-			if not rc.authed():
-				# set the flag
-				rc = None
-
-			if not rc:
-				raise
+		except RicciError, e:
+			luci_log.debug('ricci error from %s: %s' \
+				% (nodename_resolved, str(e)))
+			return None
 		except:
 			return None
 
+		cluinfo = rc.cluster_info()
+		if not cluinfo[0] and not cluinfo[1]:
+			luci_log.debug('host %s not in a cluster (expected %s)' \
+				% (nodename_resolved, clustername))
+			return None
+
+		cname = lower(clustername)
+		if cname != lower(cluinfo[0]) and cname != lower(cluinfo[1]):
+			luci_log.debug('host %s in unknown cluster %s:%s (expected %s)' \
+				% (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
+			return None
+
+		if not rc.authed():
+			rc = None
+			try:
+				snode = getStorageNode(self, nodename)
+				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+			except:
+				# we'll hit it again, and try again then
+				pass
+
+			try:
+				cnode = getClusterNode(self, nodename, clustername)
+				setNodeFlag(cnode, CLUSTER_NODE_NEED_AUTH)
+			except:
+				# we'll hit it again, and try again then
+				pass
+
+		if rc is None:
+			return None
+
 	if task == NODE_LEAVE_CLUSTER:
 		batch_number, result = nodeLeaveCluster(rc)
 
@@ -2056,40 +2125,64 @@
 		#Now we need to annotate the new DB object
 		objpath = path + "/" + objname
 		flag = self.restrictedTraverse(objpath)
-		flag.manage_addProperty(BATCH_ID,batch_id, "string")
-		flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
-		flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
+		flag.manage_addProperty(BATCH_ID, batch_id, "string")
+		flag.manage_addProperty(TASKTYPE, NODE_REBOOT, "string")
+		flag.manage_addProperty(FLAG_DESC, "Node \'" + nodename + "\' is being rebooted", "string")
 
 		response = request.RESPONSE
 		#Once again, is this correct? Should we re-direct to the cluster page?
 		response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 	elif task == NODE_FENCE:
 		#here, we DON'T want to open connection to node to be fenced.
-		path = CLUSTER_FOLDER_PATH + clustername
+		path = str(CLUSTER_FOLDER_PATH + clustername)
 		try:
 			clusterfolder = self.restrictedTraverse(path)
 			if not clusterfolder:
 				raise
 		except:
+			luci_log.debug('The cluster folder for %s could not be found.' \
+				 % clustername)
+			return None
+
+		try:
+			nodes = clusterfolder.objectItems('Folder')
+		except:
+			luci_log.debug('No cluster nodes for %s were found' % clustername)
 			return None
 
-		nodes = clusterfolder.objectItems('Folder')
 		found_one = False
 		for node in nodes:
-			if node[1].getID().find(nodename) != (-1):
+			if node[1].getId().find(nodename) != (-1):
 				continue
 
 			try:
 				rc = RicciCommunicator(node[1].getId())
-				if not rc.authed():
-					# set the node flag
-					rc = None
 				if not rc:
-					raise
-				found_one = True
-				break
+					continue
+			except RicciError, e:
+				luci_log.debug('ricci error for host %s: %s' \
+					% (node[0], str(e)))
+				continue
 			except:
 				continue
+
+			if not rc.authed():
+				rc = None
+				try:
+					snode = getStorageNode(self, node[1].getId())
+					setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				try:
+					setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				continue
+			found_one = True
+			break
+
 		if not found_one:
 			return None
 
@@ -3430,14 +3523,23 @@
 	raise
 
 def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
-	items = nodefolder.objectItems('ManagedSystem')
+	try:
+		items = nodefolder.objectItems('ManagedSystem')
+	except:
+		luci_log.debug('An error occurred while trying to list flags for cluster ' + nodefolder[0])
+		return False
 
 	for item in items:
 		if item[0] != flagname:
 			continue
 
 		#a flag already exists... try to delete it
-		rc = RicciCommunicator(hostname)
+		try:
+			rc = RicciCommunicator(hostname)
+		except:
+			luci_log.info('Unable to connect to the ricci daemon on host ' + hostname)
+			return False
+
 		finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
 		if finished == True:
 			try:
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/16 20:46:46	1.34
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/18 23:12:31	1.35
@@ -1367,7 +1367,7 @@
 		pass
 	return False
 
-def setNodeFlag(self, node, flag_mask):
+def setNodeFlag(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
 		node.manage_changeProperties({ 'flags': flags | flag_mask })
@@ -1377,7 +1377,7 @@
 		except:
 			pass
 
-def delNodeFlag(self, node, flag_mask):
+def delNodeFlag(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
 		if flags & flag_mask != 0:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-18 19:16 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-18 19:16 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-18 19:16:17

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fixes for bz#211345 and bz#211104. will backport to the RHEL5 branch when/if approved.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.120&r2=1.121

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 21:01:25	1.120
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/18 19:16:17	1.121
@@ -16,12 +16,13 @@
 from NFSClient import NFSClient
 from NFSExport import NFSExport
 from Netfs import Netfs
+from Xenvm import Xenvm
 from Script import Script
 from Samba import Samba
 from clusterOS import resolveOSType
 from GeneralError import GeneralError
 from UnknownClusterError import UnknownClusterError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag
+from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -1225,6 +1226,8 @@
 def getTabs(self, req):
   ###XXX Make this method return only tabs current user can use
   portaltabs = list()
+  if userAuthenticated(self):
+    return portaltabs
   selectedtab = "homebase"
   try:
     baseurl = req['URL']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-16 21:01 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-16 21:01 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-16 21:01:26

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	whitespace fix

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.119&r2=1.120

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 20:51:46	1.119
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 21:01:25	1.120
@@ -3415,7 +3415,7 @@
 	except:
 		pass
 
-  return False
+	return False
 
 def resolve_nodename(self, clustername, nodename):
 	path = CLUSTER_FOLDER_PATH + clustername



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-16 20:51 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-16 20:51 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-16 20:51:46

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Last loose ends before build

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.118&r2=1.119

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 20:24:28	1.118
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 20:51:46	1.119
@@ -3415,6 +3415,8 @@
 	except:
 		pass
 
+  return False
+
 def resolve_nodename(self, clustername, nodename):
 	path = CLUSTER_FOLDER_PATH + clustername
 	clusterfolder = self.restrictedTraverse(path)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-16 19:17 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-16 19:17 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-16 19:17:13

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 

Log message:
	fenceinfo method

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.115&r2=1.116
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.17&r2=1.18

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 15:18:32	1.115
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 19:17:13	1.116
@@ -907,6 +907,17 @@
   else:
     svadd['currentItem'] = False
 
+  if model.getIsVirtualized() == True:
+    vmadd = {}
+    vmadd['Title'] = "Add a XenVM"
+    vmadd['cfg_type'] = "xenvmadd"
+    vmadd['absolute_url'] = url + "?pagetype=" + XENVM_ADD + "&clustername=" + cluname
+    vmadd['Description'] = "Add a XenVM to this cluster"
+    if pagetype == XENVM_ADD:
+      vmadd['currentItem'] = True
+    else:
+      vmadd['currentItem'] = False
+
   svcfg = {}
   svcfg['Title'] = "Configure a Service"
   svcfg['cfg_type'] = "servicecfg"
@@ -922,6 +933,7 @@
     svcfg['currentItem'] = False
 
   services = model.getServices()
+  xenvms = model.getXENVMs()
   serviceable = list()
   for service in services:
     servicename = service.getName()
@@ -943,12 +955,36 @@
       svc['currentItem'] = False
 
     serviceable.append(svc)
+
+  for xenvm in xenvms:
+    xenname = xenvm.getName()
+    svc = {}
+    svc['Title'] = xenname
+    svc['cfg_type'] = "xenvm"
+    svc['absolute_url'] = url + "?pagetype=" + XENVM_CONFIG + "&servicename=" + xenname + "&clustername=" + cluname
+    svc['Description'] = "Configure this XenVM"
+    if pagetype == XENVM_CONFIG:
+      try:
+        xname = request['servicename']
+      except KeyError, e:
+        xname = ""
+      if xenname == xname:
+        svc['currentItem'] = True
+      else:
+        svc['currentItem'] = False
+    else:
+      svc['currentItem'] = False
+
+    serviceable.append(svc)
+
   svcfg['children'] = serviceable
 
 
 
   kids = list()
   kids.append(svadd)
+  if model.getIsVirtualized() == True:
+    kids.append(vmadd)
   kids.append(svcfg)
   sv['children'] = kids
 #############################################################
@@ -2322,6 +2358,17 @@
 
   return resultlist
 
+def getFence(self, model, request):
+  map = {}
+  fencename = request['fencedevicename']
+  fencedevs = model.getFenceDevices()
+  for fencedev in fencedevs:
+    if fencedev.getName().strip() == fencename:
+      map = fencedev.getAttributes()
+      return map
+
+  return map
+  
 def getFenceInfo(self, model, request):
   map = {}
   fencedevs = list() 
@@ -3362,6 +3409,8 @@
 			return False
 	return True
 
-def getModelBuilder(rc):
+def getModelBuilder(rc,isVirtualized):
 	cluster_conf_node = getClusterConf(rc)
-	return ModelBuilder(0, None, None, cluster_conf_node)
+	modelb = ModelBuilder(0, None, None, cluster_conf_node)
+  modelb.setIsVirtualized(isVirtualized)
+  return modelb
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/10/16 04:26:19	1.17
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/10/16 19:17:13	1.18
@@ -13,6 +13,8 @@
 NODE_ADD="15"
 NODE_PROCESS="16"
 NODE_LOGS="17"
+XENVM_ADD="18"
+XENVM_CONFIG="19"
 SERVICES="20"
 SERVICE_ADD="21"
 SERVICE_LIST="22"
@@ -22,6 +24,7 @@
 SERVICE_START="26"
 SERVICE_STOP="27"
 SERVICE_RESTART="28"
+XENVM_PROCESS="29"
 RESOURCES="30"
 RESOURCE_ADD="31"
 RESOURCE_LIST="32"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-16  5:28 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-16  5:28 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-16 05:27:59

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	more service stuff i had on my local machine

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.113&r2=1.114

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 04:54:33	1.113
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 05:27:59	1.114
@@ -355,7 +355,7 @@
 	buildClusterCreateFlags(self, batch_id_map, clusterName)
 	return (True, {'errors': errors, 'messages': messages})
 
-def validateServiceEdit(self, request):
+def validateServiceAdd(self, request):
 	try:
 		form_xml = request['form_xml']
 		if not form_xml:
@@ -388,11 +388,36 @@
 		if not form_parent in form_hash:
 			form_hash[form_parent] = {'form': None, 'kids': []}
 		form_hash[form_parent]['kids'].append(form_id)
-		
-	return (True, {'messages': ['OK']})
+		dummy_form = {}
+		for i in ielems:
+			try:
+				type = str(i.getAttribute('type'))
+			except:
+				continue
+			if not type or type == 'button':
+				continue
+			try:
+				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
+			except:
+				pass
 
-def validateServiceAdd(self, request):
-	return (True, {})
+		try:
+			res_type = dummy_form['type'].strip()
+			if not res_type or not res_type in resourceAddHandler:
+				raise
+		except:
+			return (False, {'errors': ['An invalid resource type was specified: ' + res_type]})
+
+		try:
+			resObj = resourceAddHandler[res_type](self, dummy_form)
+		except:
+			resObj = None
+
+		if resObj is None:
+			return (False, {'errors': ['An error occurred while adding ' + res_type]})
+		form_hash[form_id]['obj'] = resObj
+			
+	return (True, {'messages': ['This service has been updated.']})
 
 def validateResourceAdd(self, request):
 	return (True, {})
@@ -400,7 +425,6 @@
 def validateResourceEdit(self, request):
 	return (True, {})
 
-
 ## Cluster properties form validation routines
 
 def validateMCastConfig(self, form):
@@ -646,7 +670,7 @@
 	7: validateConfigCluster,
 	15: validateAddClusterNode,
 	21: validateServiceAdd,
-	24: validateServiceEdit,
+	24: validateServiceAdd,
 	31: validateResourceAdd,
 	33: validateResourceEdit,
 	51: validateFenceAdd,
@@ -2747,9 +2771,10 @@
 	response = request.RESPONSE
 	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
-def addIp(request):
+def addIp(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
-	form = request.form
 
 	if not modelb or not form:
 		return None
@@ -2785,9 +2810,10 @@
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
-def addFs(request):
+def addFs(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
-	form = request.form
 
 	if not modelb or not form:
 		return None
@@ -2861,9 +2887,9 @@
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
-def addGfs(request):
-	form = request.form
-
+def addGfs(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
 	if not modelb:
 		return None
@@ -2923,8 +2949,9 @@
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
-def addNfsm(request):
-	form = request.form
+def addNfsm(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
 
 	if not form or not modelb:
@@ -2993,8 +3020,9 @@
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
-def addNfsc(request):
-	form = request.form
+def addNfsc(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
 
 	if not form or not modelb:
@@ -3037,9 +3065,10 @@
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
-def addNfsx(request):
+def addNfsx(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
-	form = request.form
 
 	if not modelb or not form:
 		return None
@@ -3069,7 +3098,9 @@
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
-def addScr(request):
+def addScr(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
 	form = request.form
 
@@ -3109,9 +3140,10 @@
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
-def addSmb(request):
+def addSmb(request, form=None):
+	if form is None:
+		form = request.form
 	modelb = request.SESSION.get('model')
-	form = request.form
 
 	if not modelb or not form:
 		return None



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-16  4:54 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-16  4:54 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-16 04:54:33

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	services tree construction stuff

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.112&r2=1.113

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 04:51:32	1.112
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 04:54:33	1.113
@@ -365,10 +365,29 @@
 
 	try:
 		doc = minidom.parseString(form_xml)
-		if not doc.firstChild:
+		forms = doc.getElementsByTagName('form')
+		if len(forms) < 1:
 			raise
 	except:
 		return (False, {'errors': ['The resource data submitted for this service is not properly formed.']})
+
+	form_hash = {}
+	form_hash['toplevel'] = {'form': None, 'kids': [] }
+	for i in forms:
+		form_id = i.getAttribute('id')
+		form_parent = i.getAttribute('parent')
+		if not form_id or not form_parent:
+			continue
+		ielems = i.getElementsByTagName('input')
+		if not ielems or len(ielems) < 1:
+			continue
+		if not form_id in form_hash:
+			form_hash[form_id] = {'form': i, 'kids': []}
+		elif not form_hash[form_id]['form']:
+			form_hash[form_id]['form'] = i
+		if not form_parent in form_hash:
+			form_hash[form_parent] = {'form': None, 'kids': []}
+		form_hash[form_parent]['kids'].append(form_id)
 		
 	return (True, {'messages': ['OK']})
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-16  4:51 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-16  4:51 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-16 04:51:32

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           storage_adapters.py 

Log message:
	small fixes

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.111&r2=1.112
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/storage_adapters.py.diff?cvsroot=cluster&r1=1.6&r2=1.7

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 04:26:19	1.111
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/16 04:51:32	1.112
@@ -2074,13 +2074,14 @@
 		#and send out new cluster.conf
 		delete_target = None
 		try:
-			nodelist = model.getClusterNodesPtr().getChildren()
+			nodelist = model.getNodes()
+			find_node = lower(nodename)
 			for n in nodelist:
-				if n.getName() == nodename:
+				if lower(n.getName()) == find_node:
 					delete_target = n
 					break
 		except:
-			return None
+			pass
 
 		if delete_target is None:
 			return None
@@ -2091,6 +2092,8 @@
 
 		# propagate the new cluster.conf via the second node
 		batch_number, result = setClusterConf(rc2, str(str_buf))
+		if batch_number is None:
+			return None
 
 		#Now we need to delete the node from the DB
 		path = str(CLUSTER_FOLDER_PATH + clustername)
--- conga/luci/site/luci/Extensions/storage_adapters.py	2006/10/15 22:34:54	1.6
+++ conga/luci/site/luci/Extensions/storage_adapters.py	2006/10/16 04:51:32	1.7
@@ -121,7 +121,7 @@
     ssys['Description'] = "Configure storage on " + system_data['hostname']
     
     if pagetype == STORAGE:
-      if stoname == system[0]:
+      if stoname == system_data['hostname']:
         ssys['currentItem'] = True
       else:
         ssys['currentItem'] = False



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-13 22:56 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-13 22:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-13 22:56:28

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	detect changes in cluster membership and deal with them accordingly, part 1

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.109&r2=1.110

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/13 21:25:14	1.109
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/13 22:56:28	1.110
@@ -2926,12 +2926,9 @@
 
 def resolveClusterChanges(self, clusterName, modelb):
 	try:
-		mb_nodes = dict.fromkeys(modelb.getNodes())
+		mb_nodes = modelb.getNodes()
 		if not mb_nodes or not len(mb_nodes):
 			raise
-		mb_map = {}
-		for i in iter(mb_nodes):
-			mb_map[i] = i
 	except:
 		return 'Unable to find cluster nodes for ' + clusterName
 
@@ -2943,16 +2940,48 @@
 		return 'Unable to find an entry for ' + clusterName + ' in the Luci database.'
 
 	try:
-		db_nodes = cluster_node.objectItems('Folder')
+		db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
 		if not db_nodes or not len(db_nodes):
 			raise
-		db_map = {}
-		for i in iter(db_nodes):
-			db_map[i[0]] = i[0]
 	except:
 		# Should we just create them all? Can this even happen?
 		return 'Unable to find database entries for any nodes in ' + clusterName
 
+	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
+
+	# this is a really great algorithm.
+	missing_list = list()
+	new_list = list()
+	for i in mb_nodes:
+		for j in db_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			new_list.append(i)
+
+	for i in db_nodes:
+		for j in mb_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			missing_list.append(i)
+
+	messages = list()
+	for i in missing_list:
+		cluster_node.delObjects([i])
+		messages.append('Node \"' + i + '\" is no longer in a member of cluster \"' + clusterName + '.\". It has been deleted from the management interface for this cluster.')
+
+	for i in new_list:
+		cluster_node.manage_addFolder(i, '__luci__:csystem:' + clusterName)
+		cluster_node.manage_addProperty('exceptions', 'auth', 'string')
+		messages.append('A new node, \"' + i + ',\" is now a member of cluster \"' + clusterName + '.\". It has added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.')
+	
+	return messages
+
 def addResource(self, request, ragent):
 	if not request.form:
 		return (False, {'errors': ['No form was submitted.']})



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 22:11 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12 22:11 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 22:11:31

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fixups to busy stuff

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.107&r2=1.108

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 21:00:49	1.107
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 22:11:30	1.108
@@ -2258,6 +2258,7 @@
   ##check for error...if error, report and then remove flag.
   ##if no error, check if complete. If not complete, report status
   ##If complete, report status and remove flag.
+
   for item in items:
     tasktype = item[1].getProperty(TASKTYPE)
     if tasktype == CLUSTER_ADD or tasktype == NODE_ADD:
@@ -2265,13 +2266,17 @@
       node_report['isnodecreation'] = True
       node_report['iserror'] = False  #Default value
       node_report['desc'] = item[1].getProperty(FLAG_DESC) 
+      batch_xml = None
       ricci = item[0].split("____") #This removes the 'flag' suffix
       try:
         rc = RicciCommunicator(ricci[0])
+        batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
+        if batch_xml != None:
+          (creation_status, total) = batch_status(batch_xml)
       except:
         creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
+        batch_xml = "bloody_failure" #set to avoid next if statement
 
-      batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
       if batch_xml == None:  #The job is done and gone from queue
         if redirect_message == False: #We have not displayed this message yet
           node_report['desc'] = REDIRECT_MSG
@@ -2282,7 +2287,7 @@
         clusterfolder.manage_delObjects(item[0])
         continue
 
-      (creation_status, total) = batch_status(batch_xml)
+
 
       if creation_status < 0:  #an error was encountered
         if creation_status == RICCI_CONNECT_FAILURE:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 21:00 kupcevic
  0 siblings, 0 replies; 185+ messages in thread
From: kupcevic @ 2006-10-12 21:00 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	kupcevic at sourceware.org	2006-10-12 21:00:49

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	luci: install_rpms - omissions

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.106&r2=1.107
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.23&r2=1.24

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 20:54:39	1.106
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 21:00:49	1.107
@@ -183,7 +183,8 @@
 					       True,
 					       True,
 					       enable_storage,
-					       False)
+					       False,
+					       rhn_dl)
 		if not batchNode:
 			nodeUnauth(nodeList)
 			cluster_properties['isComplete'] = False
@@ -314,7 +315,8 @@
 							True,
 							True,
 							enable_storage,
-							False)
+							False,
+							rhn_dl)
 			if not batchNode:
 				raise
 			del nodeList[i]
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/10 21:33:29	1.23
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/12 21:00:49	1.24
@@ -639,7 +639,8 @@
                         install_base,
                         install_services,
                         install_shared_storage,
-                        install_LVS):
+                        install_LVS,
+                        upgrade_rpms):
 	batch = '<?xml version="1.0" ?>'
 	batch += '<batch>'
         
@@ -647,8 +648,14 @@
 	batch += '<module name="rpm">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="install">'
+	batch += '<var name="upgrade" type="boolean" value="'
+        if upgrade_rpms:
+          batch += 'true'
+        else:
+          batch += 'false'
+        batch += '"/>'
 	batch += '<var name="sets" type="list_xml">'
-	if install_base:
+        if install_base or install_services or install_shared_storage:
           batch += '<set name="Cluster Base"/>'
         if install_services:
           batch += '<set name="Cluster Service Manager"/>'
@@ -714,7 +721,8 @@
                        install_base,
                        install_services,
                        install_shared_storage,
-                       install_LVS):
+                       install_LVS,
+                       upgrade_rpms):
     batch = '<?xml version="1.0" ?>'
     batch += '<batch>'
     
@@ -722,8 +730,14 @@
     batch += '<module name="rpm">'
     batch += '<request API_version="1.0">'
     batch += '<function_call name="install">'
+    batch += '<var name="upgrade" type="boolean" value="'
+    if upgrade_rpms:
+      batch += 'true'
+    else:
+      batch += 'false'
+    batch += '"/>'
     batch += '<var name="sets" type="list_xml">'
-    if install_base:
+    if install_base or install_services or install_shared_storage:
       batch += '<set name="Cluster Base"/>'
     if install_services:
       batch += '<set name="Cluster Service Manager"/>'



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 20:54 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12 20:54 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 20:54:40

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fergot a global var

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.105&r2=1.106

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 20:48:48	1.105
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 20:54:39	1.106
@@ -2228,6 +2228,7 @@
   items = None
   map = {}
   isBusy = False
+  redirect_message = False
   nodereports = list()
   map['nodereports'] = nodereports
   cluname = req['clustername']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 20:48 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12 20:48 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 20:48:48

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 

Log message:
	fixed busy flag logic error

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.104&r2=1.105
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.15&r2=1.16

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 19:40:44	1.104
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 20:48:48	1.105
@@ -2234,6 +2234,10 @@
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   items = clusterfolder.objectItems('ManagedSystem')
+  if len(items) == 0:
+    return map  #This returns an empty map, and should indicate not busy
+  else:
+    map['busy'] = "true"
   #Ok, here is what is going on...if there is an item,
   #we need to call the ricci_bridge and get a batch report.
   #This report will tell us one of three things:
@@ -2261,11 +2265,22 @@
       ricci = item[0].split("____") #This removes the 'flag' suffix
       try:
         rc = RicciCommunicator(ricci[0])
-        batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
-        (creation_status, total) = batch_status(batch_xml)
       except:
         creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
 
+      batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
+      if batch_xml == None:  #The job is done and gone from queue
+        if redirect_message == False: #We have not displayed this message yet
+          node_report['desc'] = REDIRECT_MSG
+          node_report['iserror'] = True 
+          node_report['errormessage'] = ""
+          nodereports.append(node_report)
+          redirect_message = True
+        clusterfolder.manage_delObjects(item[0])
+        continue
+
+      (creation_status, total) = batch_status(batch_xml)
+
       if creation_status < 0:  #an error was encountered
         if creation_status == RICCI_CONNECT_FAILURE:
           laststatus = item[1].getProperty(LAST_STATUS)
@@ -2307,7 +2322,8 @@
         continue
       else:  #either batch completed successfully, or still running
         if creation_status == total:  #finished...
-          node_report['statusmessage'] = "Node created successfully"
+          map['busy'] = "true"
+          node_report['statusmessage'] = "Node created successfully" + REDIRECT_MSG
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
           clusterfolder.manage_delObjects(item[0])
@@ -2331,7 +2347,7 @@
       rb = ricci_bridge(ricci[0])
       finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
       if finished == True:
-        node_report['desc'] = item[1].getProperty(FLAG_DESC)
+        node_report['desc'] = item[1].getProperty(FLAG_DESC) + REDIRECT_MSG
         nodereports.append(node_report)
         clusterfolder.manage_delObjects(item[0])
       else:
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/10/12 19:40:44	1.15
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/10/12 20:48:48	1.16
@@ -91,6 +91,8 @@
 
 POSSIBLE_REBOOT_MESSAGE="This node is not currently responding and is probably<br/>rebooting as planned. This state should persist for 5 minutes or so..."
 
+REDIRECT_MSG="  You will be redirected in 5 seconds. Please fasten your safety restraints."
+
 
 HOMEBASE_ADD_USER="1"
 HOMEBASE_ADD_SYSTEM="2"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 19:40 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-12 19:40 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-12 19:40:44

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 
	                           homebase_adapters.py 

Log message:
	- save the cluster OS in the database so that we can compare it against nodes that are added after the cluster is initially deployed.
	- move some constants into the conga_constants.py file

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.103&r2=1.104
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.14&r2=1.15
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.28&r2=1.29

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 17:27:26	1.103
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 19:40:44	1.104
@@ -247,7 +247,7 @@
 		sessionData = None
 
 	if 'clusterName' in request.form:
-		clusterName = request.form['clusterName']
+		clusterName = str(request.form['clusterName'])
 	else:
 		return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
 
@@ -285,7 +285,24 @@
 			raise
 	except:
 		errors.append('You must specify at least one valid node to add to the cluster')
-		
+
+	try:
+		clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		cluster_os = clusterObj.manage_getProperty('cluster_os')
+		if not cluster_os:
+			raise Exception, 'no cluster OS was found.'
+		try:
+			if len(filter(lambda x: x['os'] != cluster_os, nodeList)) > 0:
+				raise Exception, 'different operating systems were detected.'
+		except:
+			nodeUnauth(nodeList)
+			cluster_properties['isComplete'] = False
+			errors.append('Cluster nodes must be running compatible operating systems.')
+	except:
+		nodeUnauth(nodeList)
+		cluster_properties['isComplete'] = False
+		errors.append('Unable to determine the cluster OS for the ' + clusterName + ' cluster.')
+
 	if not cluster_properties['isComplete']:
 		return (False, {'errors': errors, 'requestResults': cluster_properties})
 
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/10/12 15:45:49	1.14
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/10/12 19:40:44	1.15
@@ -90,3 +90,14 @@
 
 
 POSSIBLE_REBOOT_MESSAGE="This node is not currently responding and is probably<br/>rebooting as planned. This state should persist for 5 minutes or so..."
+
+
+HOMEBASE_ADD_USER="1"
+HOMEBASE_ADD_SYSTEM="2"
+HOMEBASE_PERMS="3"
+HOMEBASE_DEL_USER="4"
+HOMEBASE_DEL_SYSTEM="5"
+HOMEBASE_ADD_CLUSTER="6"
+HOMEBASE_ADD_CLUSTER_INITIAL="7"
+
+PLONE_ROOT='luci'
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/11 21:48:04	1.28
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/12 19:40:44	1.29
@@ -11,16 +11,7 @@
 from ricci_communicator import RicciCommunicator
 from ricci_communicator import CERTS_DIR_PATH
 from clusterOS import resolveOSType
-
-HOMEBASE_ADD_USER="1"
-HOMEBASE_ADD_SYSTEM="2"
-HOMEBASE_PERMS="3"
-HOMEBASE_DEL_USER="4"
-HOMEBASE_DEL_SYSTEM="5"
-HOMEBASE_ADD_CLUSTER="6"
-HOMEBASE_ADD_CLUSTER_INITIAL="7"
-
-PLONE_ROOT='luci'
+from conga_constants import *
 
 class InCluster(Exception):
 	pass
@@ -959,6 +950,19 @@
 			pass
 		return 'Unable to set permissions on new cluster \"' + clusterName + '\"-- Cluster creation failed'
 
+	# XXX this needs to be improved.
+	try:
+		cluster_os = nodeList[0]['os']
+		if not cluster_os:
+			raise KeyError, 'Cluster OS is blank'
+	except KeyError, e:
+		cluster_os = 'rhel5'
+
+	try:
+		newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
+	except:
+		pass # we were unable to set the OS property string on this cluster
+
 	for i in nodeList:
 		if 'ricci_host' in i:
 			host = str(i['ricci_host'])



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 17:27 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12 17:27 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 17:27:26

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	yikes - spelling error

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.102&r2=1.103

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 17:08:25	1.102
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 17:27:26	1.103
@@ -2253,17 +2253,17 @@
         if creation_status == RICCI_CONNECT_FAILURE:
           laststatus = item[1].getProperty(LAST_STATUS)
           if laststatus == INSTALL_TASK: #This means maybe node is rebooting
-            nodereport['statusindex'] = INSTALL_TASK
-            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + POSSIBLE_REBOOT_MESSAGE
+            node_report['statusindex'] = INSTALL_TASK
+            node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + POSSIBLE_REBOOT_MESSAGE
           elif laststatus == 0:
-            nodereport['statusindex'] = 0
-            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_INSTALL
+            node_report['statusindex'] = 0
+            node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_INSTALL
           elif laststatus == REBOOT_TASK:
-            nodereport['statusindex'] = REBOOT_TASK
-            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_CFG
+            node_report['statusindex'] = REBOOT_TASK
+            node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_CFG
           elif laststatus == SEND_CONF:
-            nodereport['statusindex'] = SEND_CONF
-            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_JOIN
+            node_report['statusindex'] = SEND_CONF
+            node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_JOIN
           nodereports.append(node_report)
           continue
         elif creation_status == -(INSTALL_TASK):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 17:08 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12 17:08 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 17:08:25

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Trap connect failure

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.101&r2=1.102

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 15:50:56	1.101
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 17:08:25	1.102
@@ -2358,8 +2358,15 @@
     map['os'] = ""
     map['isVirtualized'] = False
     return map
+  
+  try:
+    rc = RicciCommunicator(ricci_agent)
+  except:
+    map = {}
+    map['os'] = ""
+    map['isVirtualized'] = False
+    return map
 
-  rc = RicciCommunicator(ricci_agent)
   map = {}
   os_str = resolveOSType(rc.os())
   map['os'] = os_str



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 15:50 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12 15:50 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 15:50:56

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fergot colons - duh

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.100&r2=1.101

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 15:45:49	1.100
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 15:50:56	1.101
@@ -2258,10 +2258,10 @@
           elif laststatus == 0:
             nodereport['statusindex'] = 0
             nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_INSTALL
-          elif laststatus == REBOOT_TASK
+          elif laststatus == REBOOT_TASK:
             nodereport['statusindex'] = REBOOT_TASK
             nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_CFG
-          elif laststatus == SEND_CONF
+          elif laststatus == SEND_CONF:
             nodereport['statusindex'] = SEND_CONF
             nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_JOIN
           nodereports.append(node_report)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12 15:45 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12 15:45 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 15:45:49

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 

Log message:
	Fixed create progress hashmap

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.99&r2=1.100
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.13&r2=1.14

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 00:04:45	1.99
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 15:45:49	1.100
@@ -2254,9 +2254,18 @@
           laststatus = item[1].getProperty(LAST_STATUS)
           if laststatus == INSTALL_TASK: #This means maybe node is rebooting
             nodereport['statusindex'] = INSTALL_TASK
-            nodereport['statusmessage'] = POSSIBLE_REBOOT_MESSAGE
-            nodereports.append(node_report)
-            continue
+            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + POSSIBLE_REBOOT_MESSAGE
+          elif laststatus == 0:
+            nodereport['statusindex'] = 0
+            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_INSTALL
+          elif laststatus == REBOOT_TASK
+            nodereport['statusindex'] = REBOOT_TASK
+            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_CFG
+          elif laststatus == SEND_CONF
+            nodereport['statusindex'] = SEND_CONF
+            nodereport['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_JOIN
+          nodereports.append(node_report)
+          continue
         elif creation_status == -(INSTALL_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, INSTALL_TASK)
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/10/11 20:58:13	1.13
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/10/12 15:45:49	1.14
@@ -78,7 +78,15 @@
 START_NODE=4
 RICCI_CONNECT_FAILURE=(-1000)
 
+RICCI_CONNECT_FAILURE_MSG="A problem was encountered connecting with this node.  "
 #cluster/node create error messages
 CLUNODE_CREATE_ERRORS = ["An unknown error occurred when creating this node: ", "A problem occurred when installing packages: ","A problem occurred when rebooting this node: ", "A problem occurred when propagating the configuration to this node: ", "A problem occurred when starting this node: "]
 
+#cluster/node create error status messages
+PRE_INSTALL="The install state is not yet complete"
+PRE_REBOOT="Installation complete, but reboot not yet complete"
+PRE_CFG="Reboot stage successful, but configuration for the cluster is not yet distributed"
+PRE_JOIN="Packages are installed and configuration has been distributed, but the node has not yet joined the cluster."
+
+
 POSSIBLE_REBOOT_MESSAGE="This node is not currently responding and is probably<br/>rebooting as planned. This state should persist for 5 minutes or so..."



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-12  0:04 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-12  0:04 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-12 00:04:45

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	wrong way to del props

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.98&r2=1.99

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 23:56:17	1.98
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/12 00:04:45	1.99
@@ -2292,7 +2292,9 @@
           node_report['statusmessage'] = "Node still being created"
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
-          item[1].manage_delProperties(LAST_STATUS)
+          propslist = list()
+          propslist.append(LAST_STATUS)
+          item[1].manage_delProperties(propslist)
           item[1].manage_addProperty(LAST_STATUS,creation_status, "int")
           continue
           



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 23:56 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-11 23:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-11 23:56:18

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Property sheet bugfix

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.97&r2=1.98

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 23:11:12	1.97
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 23:56:17	1.98
@@ -2292,6 +2292,7 @@
           node_report['statusmessage'] = "Node still being created"
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
+          item[1].manage_delProperties(LAST_STATUS)
           item[1].manage_addProperty(LAST_STATUS,creation_status, "int")
           continue
           



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 23:11 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-11 23:11 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-11 23:11:13

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	resolver issue conflicts

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.96&r2=1.97

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 23:08:34	1.96
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 23:11:12	1.97
@@ -2347,20 +2347,12 @@
     map['isVirtualized'] = False
     return map
 
-  try:
-	  ricci_agent = resolve_nodename(self, clustername, ragent)
-  except:
-    map = {}
-    map['os'] = ""
-    map['isVirtualized'] = False
-    return map
-
-	rc = RicciCommunicator(ricci_agent)
-	map = {}
-	os_str = resolveOSType(rc.os())
-	map['os'] = os_str
-	map['isVirtualized'] = rc.dom0()
-	return map
+  rc = RicciCommunicator(ricci_agent)
+  map = {}
+  os_str = resolveOSType(rc.os())
+  map['os'] = os_str
+  map['isVirtualized'] = rc.dom0()
+  return map
 
 def getResourcesInfo(modelb, request):
 	resList = list()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 23:08 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-11 23:08 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-11 23:08:34

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix for a very strange zope (bug?)

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.95&r2=1.96

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 22:37:28	1.95
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 23:08:34	1.96
@@ -216,15 +216,16 @@
 	return (True, {'errors': errors, 'messages': messages })
 
 def buildClusterCreateFlags(self, batch_map, clusterName):
-  path = CLUSTER_FOLDER_PATH + clusterName
+  path = str(CLUSTER_FOLDER_PATH + clusterName)
   clusterfolder = self.restrictedTraverse(path)
   for key in batch_map.keys():
+    key = str(key)
     id = batch_map[key]
     batch_id = str(id)
-    objname = key + "____flag" #This suffix needed to avoid name collision
+    objname = str(key + "____flag") #This suffix needed to avoid name collision
     clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #now designate this new object properly
-    objpath = path + "/" + objname
+    objpath = str(path + "/" + objname)
     flag = self.restrictedTraverse(objpath)
     #flag[BATCH_ID] = batch_id
     #flag[TASKTYPE] = CLUSTER_ADD
@@ -2328,15 +2329,23 @@
   return map
 
 def getClusterOS(self, ragent, request):
-	try:
-		clustername = request['clustername']
-	except KeyError, e:
-		try:
-			clustername = request.form['clustername']
-		except:
-			return {}
-	except:
-		return {}
+  try:
+    clustername = request['clustername']
+  except KeyError, e:
+    try:
+      clustername = request.form['clustername']
+    except:
+      return {}
+  except:
+    return {}
+
+  try:
+    ricci_agent = resolve_nodename(self, clustername, ragent)
+  except:
+    map = {}
+    map['os'] = ""
+    map['isVirtualized'] = False
+    return map
 
   try:
 	  ricci_agent = resolve_nodename(self, clustername, ragent)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 22:37 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-11 22:37 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-11 22:37:28

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	resolver fix

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.94&r2=1.95

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 20:58:13	1.94
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 22:37:28	1.95
@@ -2198,7 +2198,11 @@
 def getLogsForNode(self, request):
   nodename = request['nodename']
   clustername = request['clustername']
-  nodename_resolved = resolve_nodename(self, clustername, nodename)
+  try:
+    nodename_resolved = resolve_nodename(self, clustername, nodename)
+  except:
+    return "Unable to resolve node name %s to retrieve logging information" % nodename
+
   rb = ricci_bridge(nodename_resolved)
   return rb.getNodeLogs()
 
@@ -2334,7 +2338,14 @@
 	except:
 		return {}
 
-	ricci_agent = resolve_nodename(self, clustername, ragent)
+  try:
+	  ricci_agent = resolve_nodename(self, clustername, ragent)
+  except:
+    map = {}
+    map['os'] = ""
+    map['isVirtualized'] = False
+    return map
+
 	rc = RicciCommunicator(ricci_agent)
 	map = {}
 	os_str = resolveOSType(rc.os())
@@ -2921,12 +2932,12 @@
 def resolve_nodename(self, clustername, nodename):
   path = CLUSTER_FOLDER_PATH + clustername
   clusterfolder = self.restrictedTraverse(path)
-  objs = clusterfolder.objectItems()
+  objs = clusterfolder.objectItems('Folder')
   for obj in objs:
     if obj[0].find(nodename) != (-1):
       return obj[0]
 
-  return None
+  raise
 
 def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
   items = nodefolder.objectItems()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 20:58 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-11 20:58 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-11 20:58:13

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 

Log message:
	added laststatus field to props

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.93&r2=1.94
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.12&r2=1.13

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 17:43:30	1.93
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 20:58:13	1.94
@@ -232,6 +232,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,CLUSTER_ADD, "string")
     flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
+    flag.manage_addProperty(LAST_STATUS, 0, "int")
 
 
 def validateAddClusterNode(self, request):
@@ -2236,11 +2237,22 @@
       node_report['iserror'] = False  #Default value
       node_report['desc'] = item[1].getProperty(FLAG_DESC) 
       ricci = item[0].split("____") #This removes the 'flag' suffix
-      rc = RicciCommunicator(ricci[0])
-      batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
-      (creation_status, total) = batch_status(batch_xml)
+      try:
+        rc = RicciCommunicator(ricci[0])
+        batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
+        (creation_status, total) = batch_status(batch_xml)
+      except:
+        creation_status = RICCI_CONNECT_FAILURE  #No contact with ricci (-1000)
+
       if creation_status < 0:  #an error was encountered
-        if creation_status == -(INSTALL_TASK):
+        if creation_status == RICCI_CONNECT_FAILURE:
+          laststatus = item[1].getProperty(LAST_STATUS)
+          if laststatus == INSTALL_TASK: #This means maybe node is rebooting
+            nodereport['statusindex'] = INSTALL_TASK
+            nodereport['statusmessage'] = POSSIBLE_REBOOT_MESSAGE
+            nodereports.append(node_report)
+            continue
+        elif creation_status == -(INSTALL_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, INSTALL_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[INSTALL_TASK] + err_msg
@@ -2275,7 +2287,7 @@
           node_report['statusmessage'] = "Node still being created"
           node_report['statusindex'] = creation_status
           nodereports.append(node_report)
-          clusterfolder.manage_delObjects(item[0])
+          item[1].manage_addProperty(LAST_STATUS,creation_status, "int")
           continue
           
     else:
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/10/10 21:26:01	1.12
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/10/11 20:58:13	1.13
@@ -58,6 +58,7 @@
 CLUNAME="clustername"
 BATCH_ID="batch_id"
 FLAG_DESC="flag_desc"
+LAST_STATUS="last_status"
 
 PATH_TO_PRIVKEY="/var/lib/luci/var/certs/privkey.pem"
 PATH_TO_CACERT="/var/lib/luci/var/certs/cacert.pem"
@@ -75,6 +76,9 @@
 REBOOT_TASK=2
 SEND_CONF=3
 START_NODE=4
+RICCI_CONNECT_FAILURE=(-1000)
 
 #cluster/node create error messages
 CLUNODE_CREATE_ERRORS = ["An unknown error occurred when creating this node: ", "A problem occurred when installing packages: ","A problem occurred when rebooting this node: ", "A problem occurred when propagating the configuration to this node: ", "A problem occurred when starting this node: "]
+
+POSSIBLE_REBOOT_MESSAGE="This node is not currently responding and is probably<br/>rebooting as planned. This state should persist for 5 minutes or so..."



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 17:43 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-11 17:43 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-11 17:43:30

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	added other case in fence info method.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.92&r2=1.93

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 17:29:46	1.92
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 17:43:30	1.93
@@ -2097,14 +2097,25 @@
   return resultlist
 
 def getFenceInfo(self, model, request=None):
-  map = list() 
+  map = {}
+  fencedevs = list() 
   level1 = list()
   level2 = list()
-  map.append(level1)
-  map.append(level2)
+  map['level1'] = level1
+  map['level2'] = level2
+  map['fencedevs'] = fencedevs
   nodename = ""
   if request == None:  #this is being called by the fence device page
-    pass
+    #Get list of fence devices
+    fds = model.getFenceDevices()
+    for fd in fds:
+      #create fencedev hashmap
+      if fd.isShared() == True:
+        fencedev = fd.getAttributes()
+        fencedevs.append(fencedev)
+      
+    return map
+
   else:
     try:
       nodename = request['nodename']
@@ -2174,12 +2185,14 @@
           else:  #Not a shareable fence device type
             for kee in kees:
               fence_struct[kee] = fi_attrs[kee]
-      map[i].append(fence_struct)      
+        if i == 0:
+          level1.append(fence_struct)      
+        else:
+          level2.append(fence_struct)      
 
     return map    
       
 
-      
 
 def getLogsForNode(self, request):
   nodename = request['nodename']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 17:29 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-11 17:29 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-11 17:29:47

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 

Log message:
	fix a couple of cluster deploy errors

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.91&r2=1.92
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.25&r2=1.26

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 16:35:19	1.91
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 17:29:46	1.92
@@ -159,19 +159,21 @@
 	try:
 		cluster_os = nodeList[0]['os']
 		if not cluster_os:
-			raise
-		if len(filter(lambda x: x != cluster_os, nodeList[1:])) > 0:
-			raise
+			raise KeyError('OS for ' + nodeList[0]['host'] + ' is blank')
 	except KeyError, e:
-		errors.append('Unable to identify the operating system running on the first cluster node.')
-		cluster_properties['isComplete'] = False
-	except:
-		errors.append('Cluster nodes must be running compatible operating systems.')
 		cluster_properties['isComplete'] = False
+		errors.append('Unable to identify the operating system running on the first cluster node: ' + str(e))
 
 	if cluster_properties['isComplete'] != True:
 		nodeUnauth(nodeList)
 		return (False, {'errors': errors, 'requestResults':cluster_properties })
+	else:
+		try:
+			if len(filter(lambda x: x['os'] != cluster_os, nodeList[1:])) > 0:
+				raise Exception('different operating systems were detected.')
+		except:
+			cluster_properties['isComplete'] = False
+			errors.append('Cluster nodes must be running compatible operating systems.')
 
 	if cluster_properties['isComplete'] == True:
 		batchNode = createClusterBatch(cluster_os,
@@ -2223,7 +2225,7 @@
       ricci = item[0].split("____") #This removes the 'flag' suffix
       rc = RicciCommunicator(ricci[0])
       batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
-      (creation_status, total) = rc.batch_status(batch_xml)
+      (creation_status, total) = batch_status(batch_xml)
       if creation_status < 0:  #an error was encountered
         if creation_status == -(INSTALL_TASK):
           node_report['iserror'] = True
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/11 16:18:58	1.25
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/11 17:29:46	1.26
@@ -148,6 +148,7 @@
 
 def nodeAuth(cluster, host, passwd):
 	systemName = host
+	os_str = 'rhel5'
 
 	try:
 		rc = RicciCommunicator(host)
@@ -156,15 +157,14 @@
 		systemName = rc.system_name()
 	except:
 		error = 'Unable to establish a connection to the ricci agent on \"' + host + '\"'
-		return { 'host': host, 'ricci_host': systemName, 'errors': error, 'cur_auth': False, 'os': None }
-
+		return { 'host': host, 'ricci_host': systemName, 'errors': error, 'cur_auth': False, 'os': os_str }
 
 	if rc.authed():
 		prevAuth = True
 	else:
 		prevAuth = False
 		if not passwd:
-			return { 'host': host, 'ricci_host': systemName, 'prev_auth': False, 'cur_auth': False, 'os': None}
+			return { 'host': host, 'ricci_host': systemName, 'prev_auth': False, 'cur_auth': False, 'os': os_str }
 		else:
 			try:
 				rc.auth(passwd)
@@ -190,7 +190,7 @@
 		return node
 
 	error = 'Unable to authenticate to the ricci agent on \"' + host + '\"'
-	return { 'host': host, 'ricci_host': systemName, 'prev_auth': False , 'cur_auth': False, 'errors': error, 'os': None }
+	return { 'host': host, 'ricci_host': systemName, 'prev_auth': False , 'cur_auth': False, 'errors': error, 'os': os_str }
 
 def validateAddClusterInitial(self, request):
 	errors = list()
@@ -225,6 +225,10 @@
 	except:
 		cluster_info = None
 
+	os_str = resolveOSType(rc.os())
+	if os_str == None:
+		os_str = "rhel5"  #Backup plan in case all is almost lost...
+
 	if not cluster_info or not cluster_info[0]:
 		if not prevAuth:
 			rc.unauth()
@@ -239,7 +243,7 @@
 	if cluConf:
 		nodeList = getClusterConfNodes(cluConf)
 
-	if not cluConf or not nodeList or len(nodeList) < 2:
+	if not cluConf or not nodeList or len(nodeList) < 1:
 		if not prevAuth:
 			rc.unauth()
 		return (False, { 'errors': [ 'Error retrieving member nodes for cluster \"' + clusterName + '\"' ] })
@@ -248,7 +252,7 @@
 	if systemName[:9] == 'localhost':
 		systemName = sysData[0]
 
-	node = { 'host': sysData[0], 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': rc.authed() }
+	node = { 'host': sysData[0], 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': rc.authed(), 'os': os_str }
 	nodeHash[sysData[0]] = node
 	rnodeHash[systemName] = node
 	newNodeList.append(node)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 16:35 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-11 16:35 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-11 16:35:19

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Label change

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.90&r2=1.91

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 16:24:59	1.90
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 16:35:19	1.91
@@ -1023,7 +1023,7 @@
   fd['children'] = kids
 #############################################################
   fen = {}
-  fen['Title'] = "Fence Devices"
+  fen['Title'] = "Shared Fence Devices"
   fen['cfg_type'] = "fencedevicess"
   fen['absolute_url'] = url + "?pagetype=" + FENCEDEVS + "&clustername=" + cluname
   fen['Description'] = "Fence Device configuration for this cluster"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 16:25 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-11 16:25 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-11 16:24:59

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fixes for cluster and node creation busywait page

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.89&r2=1.90

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 16:18:58	1.89
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 16:24:59	1.90
@@ -2128,7 +2128,7 @@
         level = levels[i]
       else:
         #No more levels...
-        return map
+        continue
       kids = level.getChildren()
       if len(kids) == 0:
         continue
@@ -2189,6 +2189,9 @@
 def isClusterBusy(self, req):
   items = None
   map = {}
+  isBusy = False
+  nodereports = list()
+  map['nodereports'] = nodereports
   cluname = req['clustername']
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
@@ -2200,32 +2203,97 @@
   ##normal page
   ##2) The batch task is NOT done, so meta refresh in 5 secs and try again
   ##3) The ricci agent has no recollection of the task, so handle like 1 above
+  ###
+  ##Here is what we have to do:
+  ##the map should have two lists:
+  ##One list of non-cluster create tasks
+  ##and one of cluster create task structs
+  ##For each item in items, check if this is a cluster create tasktype
+  ##If so, call RC, and then call stan's batch report method
+  ##check for error...if error, report and then remove flag.
+  ##if no error, check if complete. If not complete, report status
+  ##If complete, report status and remove flag.
   for item in items:
-    #Check here for more than 1 entry (an error)
-    ricci = item[0].split("____") #This removes the 'flag' suffix
-    rb = ricci_bridge(ricci[0])
-    finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
-    if finished == True:
-      clusterfolder.manage_delObjects(item[0])
-      map['refreshurl'] = '5; url=\".\"'
-      map['desc'] = item[1].getProperty(FLAG_DESC)
-      return map
+    tasktype = item[1].getProperty(TASKTYPE)
+    if tasktype == CLUSTER_ADD or tasktype == NODE_ADD:
+      node_report = {}
+      node_report['isnodecreation'] = True
+      node_report['iserror'] = False  #Default value
+      node_report['desc'] = item[1].getProperty(FLAG_DESC) 
+      ricci = item[0].split("____") #This removes the 'flag' suffix
+      rc = RicciCommunicator(ricci[0])
+      batch_xml = rc.batch_report(item[1].getProperty(BATCH_ID))
+      (creation_status, total) = rc.batch_status(batch_xml)
+      if creation_status < 0:  #an error was encountered
+        if creation_status == -(INSTALL_TASK):
+          node_report['iserror'] = True
+          (err_code, err_msg) = extract_module_status(batch_xml, INSTALL_TASK)
+          node_report['errormessage'] = CLUNODE_CREATE_ERRORS[INSTALL_TASK] + err_msg
+        elif creation_status == -(REBOOT_TASK):
+          node_report['iserror'] = True
+          (err_code, err_msg) = extract_module_status(batch_xml, REBOOT_TASK)
+          node_report['errormessage'] = CLUNODE_CREATE_ERRORS[REBOOT_TASK] + err_msg
+        elif creation_status == -(SEND_CONF):
+          node_report['iserror'] = True
+          (err_code, err_msg) = extract_module_status(batch_xml, SEND_CONF)
+          node_report['errormessage'] = CLUNODE_CREATE_ERRORS[SEND_CONF] + err_msg
+        elif creation_status == -(START_NODE):
+          node_report['iserror'] = True
+          (err_code, err_msg) = extract_module_status(batch_xml, START_NODE)
+          node_report['errormessage'] = CLUNODE_CREATE_ERRORS[START_NODE]
+        else:
+          node_report['iserror'] = True
+          node_report['errormessage'] = CLUNODE_CREATE_ERRORS[0]
+        clusterfolder.manage_delObjects(item[0])
+        nodereports.append(node_report)
+        continue
+      else:  #either batch completed successfully, or still running
+        if creation_status == total:  #finished...
+          node_report['statusmessage'] = "Node created successfully"
+          node_report['statusindex'] = creation_status
+          nodereports.append(node_report)
+          clusterfolder.manage_delObjects(item[0])
+          continue
+        else:
+          map['busy'] = "true"
+          isBusy = True
+          node_report['statusmessage'] = "Node still being created"
+          node_report['statusindex'] = creation_status
+          nodereports.append(node_report)
+          clusterfolder.manage_delObjects(item[0])
+          continue
+          
     else:
-      map['busy'] = "true"
-      part1 = req['ACTUAL_URL']
-      part2 = req['QUERY_STRING']
-      dex = part2.find("&busyfirst")
-      if dex != (-1):
-        tmpstr = part2[:dex] #This strips off busyfirst var
-        part2 = tmpstr
-        ###FIXME - The above assumes that the 'busyfirst' query var is at the
-        ###end of the URL...
-      wholeurl = part1 + "?" + part2
-      #map['url'] = "5, url=" + req['ACTUAL_URL'] + "?" + req['QUERY_STRING']
-      map['refreshurl'] = "5; url=" + wholeurl
-      map['desc'] = item[1].getProperty(FLAG_DESC)
-      req['specialpagetype'] = "1"
-      return map
+      node_report = {}
+      node_report['isnodecreation'] = False
+      ricci = item[0].split("____") #This removes the 'flag' suffix
+      rb = ricci_bridge(ricci[0])
+      finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
+      if finished == True:
+        node_report['desc'] = item[1].getProperty(FLAG_DESC)
+        nodereports.append(node_report)
+        clusterfolder.manage_delObjects(item[0])
+      else:
+        node_report = {}
+        map['busy'] = "true"
+        isBusy = True
+        node_report['desc'] = item[1].getProperty(FLAG_DESC)
+        nodereports.append(node_report)
+  if isBusy:
+    part1 = req['ACTUAL_URL']
+    part2 = req['QUERY_STRING']
+    dex = part2.find("&busyfirst")
+    if dex != (-1):
+      tmpstr = part2[:dex] #This strips off busyfirst var
+      part2 = tmpstr
+      ###FIXME - The above assumes that the 'busyfirst' query var is@the
+      ###end of the URL...
+    wholeurl = part1 + "?" + part2
+    #map['url'] = "5, url=" + req['ACTUAL_URL'] + "?" + req['QUERY_STRING']
+    map['refreshurl'] = "5; url=" + wholeurl
+    req['specialpagetype'] = "1"
+  else:
+    map['refreshurl'] = '5; url=\".\"'
   return map
 
 def getClusterOS(self, ragent, request):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-11 16:18 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-11 16:18 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-11 16:18:58

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 

Log message:
	- robustness fixes for homebase
	- commit create cluster/add node backend param handler code

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.88&r2=1.89
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.24&r2=1.25

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/10 21:33:29	1.88
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/11 16:18:58	1.89
@@ -132,6 +132,22 @@
 	errors.extend(ret[0])
 	cluster_properties = ret[1]
 
+	rhn_dl = 1
+	try:
+		rhn_dls = request.form['rhn_dl'].strip().lower()
+		if rhn_dls != '1' and rhn_dls != 'true':
+			rhn_dl = 0
+	except:
+		rhn_dl = 0
+
+	enable_storage = 0
+	try:
+		enable_storage_str = request.form['enable_storage'].strip().lower()
+		if enable_storage_str:
+			enable_storage = 1
+	except:
+		enable_storage = 0
+
 	try:
 		nodeList = cluster_properties['nodeList']
 		if len(nodeList) < 1:
@@ -164,7 +180,7 @@
 					       map(lambda x: x['ricci_host'], nodeList),
 					       True,
 					       True,
-					       False,
+					       enable_storage,
 					       False)
 		if not batchNode:
 			nodeUnauth(nodeList)
@@ -231,6 +247,22 @@
 	else:
 		return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
 
+	rhn_dl = 1
+	try:
+		rhn_dls = request.form['rhn_dl'].strip().lower()
+		if rhn_dls != '1' and rhn_dls != 'true':
+			rhn_dl = 0
+	except:
+		rhn_dl = 0
+
+	enable_storage = 0
+	try:
+		enable_storages = request.form['enable_storage'].strip().lower()
+		if enable_storages:
+			enable_storage = 1
+	except:
+		enable_storage = 0
+
 	try:
 		numStorage = int(request.form['numStorage'])
 		if numStorage < 1:
@@ -260,7 +292,7 @@
 			batchNode = addClusterNodeBatch(clusterName,
 							True,
 							True,
-							False,
+							enable_storage,
 							False)
 			if not batchNode:
 				raise
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/06 20:45:26	1.24
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/10/11 16:18:58	1.25
@@ -450,7 +450,7 @@
 
 	userId = user.getUserId()
 
-	clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')()
+	clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
 	if not '__CLUSTER' in request.form:
 		for i in clusters:
 			try:
@@ -491,7 +491,7 @@
 				except:
 						errors.append('Failed to remove permission for ' + userId + ' for cluster ' + i[0])
 
-	storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')()
+	storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
 	if not '__SYSTEM' in request.form:
 		for i in storage:
 			try:
@@ -794,7 +794,7 @@
 
 def getClusterSystems(self, clusterName):
 	if isAdmin(self):
-		return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')()
+		return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
 
 	try:
 		i = getSecurityManager().getUser()
@@ -803,7 +803,7 @@
 	except:
 		return None
 
-	csystems = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')()
+	csystems = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
 	if not csystems:
 		return None
 
@@ -815,7 +815,7 @@
 
 def getClusters(self):
 	if isAdmin(self):
-		return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')()
+		return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
 	try:
 		i = getSecurityManager().getUser()
 		if not i:
@@ -823,7 +823,7 @@
 	except:
 		return None
 
-	clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')()
+	clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
 	if not clusters:
 		return None
 
@@ -836,7 +836,7 @@
 
 def getStorage(self):
 	if isAdmin(self):
-		return self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')()
+		return self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
 	try:
 		i = getSecurityManager().getUser()
 		if not i:
@@ -844,7 +844,7 @@
 	except:
 		return None
 
-	storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')()
+	storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
 	if not storage:
 		return None
 
@@ -1146,8 +1146,8 @@
 		perms[userName]['cluster'] = {}
 		perms[userName]['storage'] = {}
 
-		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')()
-		storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')()
+		clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/objectItems')('Folder')
+		storage = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/objectItems')('Folder')
 
 		for c in clusters:
 			perms[userName]['cluster'][c[0]] = i.has_role('View', c[1])



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-10 21:33 kupcevic
  0 siblings, 0 replies; 185+ messages in thread
From: kupcevic @ 2006-10-10 21:33 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	kupcevic at sourceware.org	2006-10-10 21:33:29

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	luci: backend support for selection of what to install during deployment

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.87&r2=1.88
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.22&r2=1.23

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/09 20:21:58	1.87
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/10 21:33:29	1.88
@@ -158,7 +158,14 @@
 		return (False, {'errors': errors, 'requestResults':cluster_properties })
 
 	if cluster_properties['isComplete'] == True:
-		batchNode = createClusterBatch(cluster_os, clusterName, clusterName, map(lambda x: x['ricci_host'], nodeList), True, False, False)
+		batchNode = createClusterBatch(cluster_os,
+					       clusterName,
+					       clusterName,
+					       map(lambda x: x['ricci_host'], nodeList),
+					       True,
+					       True,
+					       False,
+					       False)
 		if not batchNode:
 			nodeUnauth(nodeList)
 			cluster_properties['isComplete'] = False
@@ -250,7 +257,11 @@
 	while i < len(nodeList):
 		clunode = nodeList[i]
 		try:
-			batchNode = addClusterNodeBatch(clusterName, True, False, False)
+			batchNode = addClusterNodeBatch(clusterName,
+							True,
+							True,
+							False,
+							False)
 			if not batchNode:
 				raise
 			del nodeList[i]
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/09 20:17:29	1.22
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/10 21:33:29	1.23
@@ -635,31 +635,49 @@
     #parse out log entry  
     return payload
 
-def addClusterNodeBatch(cluster_name, services, shared_storage, LVS):
+def addClusterNodeBatch(cluster_name,
+                        install_base,
+                        install_services,
+                        install_shared_storage,
+                        install_LVS):
 	batch = '<?xml version="1.0" ?>'
 	batch += '<batch>'
+        
+        
 	batch += '<module name="rpm">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="install">'
 	batch += '<var name="sets" type="list_xml">'
-	batch += '<set name="Cluster Base"/>'
-	if services:
-		batch += '<set name="Cluster Service Manager"/>'
-	if shared_storage:
-		batch += '<set name="Clustered Storage"/>'
-	if LVS:
-		batch += '<set name="Linux Virtual Server"/>'
+	if install_base:
+          batch += '<set name="Cluster Base"/>'
+        if install_services:
+          batch += '<set name="Cluster Service Manager"/>'
+	if install_shared_storage:
+          batch += '<set name="Clustered Storage"/>'
+	if install_LVS:
+          batch += '<set name="Linux Virtual Server"/>'
 	batch += '</var>'
 	batch += '</function_call>'
 	batch += '</request>'
 	batch += '</module>'
-
-	batch += '<module name="reboot">'
-	batch += '<request API_version="1.0">'
-	batch += '<function_call name="reboot_now"/>'
-	batch += '</request>'
-	batch += '</module>'
-
+        
+        
+	need_reboot = install_base or install_services or install_shared_storage or install_LVS
+        if need_reboot:
+          batch += '<module name="reboot">'
+          batch += '<request API_version="1.0">'
+          batch += '<function_call name="reboot_now"/>'
+          batch += '</request>'
+          batch += '</module>'
+        else:
+          # need placeholder instead of reboot
+          batch += '<module name="rpm">'
+          batch += '<request API_version="1.0">'
+          batch += '<function_call name="install"/>'
+          batch += '</request>'
+          batch += '</module>'
+        
+        
 	batch += '<module name="cluster">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="set_cluster.conf">'
@@ -676,45 +694,65 @@
 	batch += '</function_call>'
 	batch += '</request>'
 	batch += '</module>'
-
+        
+        
 	batch += '<module name="cluster">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="start_node"/>'
 	batch += '</request>'
 	batch += '</module>'
+        
+        
 	batch += '</batch>'
 
 	return minidom.parseString(batch).firstChild
 
-def createClusterBatch(os_str, cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):
+def createClusterBatch(os_str,
+                       cluster_name,
+                       cluster_alias,
+                       nodeList,
+                       install_base,
+                       install_services,
+                       install_shared_storage,
+                       install_LVS):
     batch = '<?xml version="1.0" ?>'
     batch += '<batch>'
+    
+    
     batch += '<module name="rpm">'
     batch += '<request API_version="1.0">'
     batch += '<function_call name="install">'
     batch += '<var name="sets" type="list_xml">'
-    batch += '<set name="Cluster Base"/>'
-
-    if services:
-        batch += '<set name="Cluster Service Manager"/>'
-
-    if shared_storage:
-        batch += '<set name="Clustered Storage"/>'
-
-    if LVS:
-        batch += '<set name="Linux Virtual Server"/>'
-
+    if install_base:
+      batch += '<set name="Cluster Base"/>'
+    if install_services:
+      batch += '<set name="Cluster Service Manager"/>'
+    if install_shared_storage:
+      batch += '<set name="Clustered Storage"/>'
+    if install_LVS:
+      batch += '<set name="Linux Virtual Server"/>'
     batch += '</var>'
     batch += '</function_call>'
     batch += '</request>'
     batch += '</module>'
     
-    batch += '<module name="reboot">'
-    batch += '<request API_version="1.0">'
-    batch += '<function_call name="reboot_now"/>'
-    batch += '</request>'
-    batch += '</module>'
     
+    need_reboot = install_base or install_services or install_shared_storage or install_LVS
+    if need_reboot:
+      batch += '<module name="reboot">'
+      batch += '<request API_version="1.0">'
+      batch += '<function_call name="reboot_now"/>'
+      batch += '</request>'
+      batch += '</module>'
+    else:
+      # need placeholder instead of reboot
+      batch += '<module name="rpm">'
+      batch += '<request API_version="1.0">'
+      batch += '<function_call name="install"/>'
+      batch += '</request>'
+      batch += '</module>'
+    
+      
     batch += '<module name="cluster">'
     batch += '<request API_version="1.0">'
     batch += '<function_call name="set_cluster.conf">'
@@ -747,6 +785,7 @@
     batch += '</request>'
     batch += '</module>'
     
+    
     batch += '<module name="cluster">'
     batch += '<request API_version="1.0">'
     batch += '<function_call name="start_node">'
@@ -754,7 +793,9 @@
     batch += '</function_call>'
     batch += '</request>'
     batch += '</module>'
+    
+    
     batch += '</batch>'
-
+    
     return minidom.parseString(batch).firstChild
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-09 20:21 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-09 20:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-09 20:21:58

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	another typo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.86&r2=1.87

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/09 17:12:28	1.86
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/09 20:21:58	1.87
@@ -293,7 +293,7 @@
 		if not form_xml:
 			raise KeyError('form_xml must not be blank')
 	except KeyError, e:
-		return (False, {errors: ['No resource data was supplied for this service.']})
+		return (False, {'errors': ['No resource data was supplied for this service.']})
 
 	try:
 		doc = minidom.parseString(form_xml)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-04 16:20 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-04 16:20 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-04 16:20:22

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	python punctuation pedantry

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.82&r2=1.83

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/04 16:05:43	1.82
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/04 16:20:22	1.83
@@ -139,10 +139,10 @@
 
 	if cluster_properties['isComplete'] == True:
 		from ricci_communicator import RicciCommunicator
-    rc = RicciCommunicator(nodeList[0]['ricci_host'])
-    os_str = resolveOSType(rc.os())
-    if os_str == None:
-      os_str = "rhel5"  #Backup plan in case all is almost lost...
+		rc = RicciCommunicator(nodeList[0]['ricci_host'])
+		os_str = resolveOSType(rc.os())
+		if os_str == None:
+			os_str = "rhel5"  #Backup plan in case all is almost lost...
 
 		batchNode = createClusterBatch(os_str, clusterName, clusterName, map(lambda x: x['ricci_host'], nodeList), True, False, False)
 		if not batchNode:
@@ -159,7 +159,7 @@
 			return (False, {'errors': errors, 'requestResults':cluster_properties })
 
 		batch_id_map = {}
-    rc = None
+		rc = None
 		for i in nodeList:
 			try:
 				rc = RicciCommunicator(i['ricci_host'])
@@ -2169,7 +2169,7 @@
 	ricci_agent = resolve_nodename(self, clustername, ragent)
 	rc = RicciCommunicator(ricci_agent)
 	map = {}
-  os_str = resolveOSType(rc.os())
+	os_str = resolveOSType(rc.os())
 	map['os'] = os_str
 	map['isVirtualized'] = rc.dom0()
 	return map



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-04 16:05 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-04 16:05 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-04 16:05:43

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	os version schtuff

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.81&r2=1.82
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.20&r2=1.21

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/04 15:11:10	1.81
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/04 16:05:43	1.82
@@ -139,8 +139,12 @@
 
 	if cluster_properties['isComplete'] == True:
 		from ricci_communicator import RicciCommunicator
+    rc = RicciCommunicator(nodeList[0]['ricci_host'])
+    os_str = resolveOSType(rc.os())
+    if os_str == None:
+      os_str = "rhel5"  #Backup plan in case all is almost lost...
 
-		batchNode = createClusterBatch(clusterName, clusterName, map(lambda x: x['ricci_host'], nodeList), True, False, False)
+		batchNode = createClusterBatch(os_str, clusterName, clusterName, map(lambda x: x['ricci_host'], nodeList), True, False, False)
 		if not batchNode:
 			nodeUnauth(nodeList)
 			cluster_properties['isComplete'] = False
@@ -155,6 +159,7 @@
 			return (False, {'errors': errors, 'requestResults':cluster_properties })
 
 		batch_id_map = {}
+    rc = None
 		for i in nodeList:
 			try:
 				rc = RicciCommunicator(i['ricci_host'])
@@ -2164,10 +2169,17 @@
 	ricci_agent = resolve_nodename(self, clustername, ragent)
 	rc = RicciCommunicator(ricci_agent)
 	map = {}
-	map['os'] = rc.os()
+  os_str = resolveOSType(rc.os())
+	map['os'] = os_str
 	map['isVirtualized'] = rc.dom0()
 	return map
 
+def resolveOSType(os):
+  if os.find("Tikanga") != (-1) or os.find("FC6") != (-1):
+    return "rhel5"
+  else:
+    return "rhel4"
+
 def getResourcesInfo(modelb, request):
 	resList = list()
 	baseurl = request['URL']
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/09/29 21:41:43	1.20
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/10/04 16:05:43	1.21
@@ -686,7 +686,7 @@
 
 	return minidom.parseString(batch).firstChild
 
-def createClusterBatch(cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):
+def createClusterBatch(os_str, cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):
     batch = '<?xml version="1.0" ?>'
     batch += '<batch>'
     batch += '<module name="rpm">'
@@ -724,8 +724,14 @@
     batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
 
     batch += '<clusternodes>'
+    x = 0
     for i in nodeList:
-        batch += '<clusternode name="' + i + '" votes="1" />'
+        if os_str == "rhel4":
+          batch += '<clusternode name="' + i + '" votes="1" />'
+        else:
+          batch += '<clusternode name="' + i + '" votes="1" nodeid="' + x + '" />'
+        x = x + 1
+
     batch += '</clusternodes>'
 
     if len(nodeList) == 2:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-04 15:11 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-10-04 15:11 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-10-04 15:11:10

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py FenceDevice.py 
	                           FenceHandler.py 

Log message:
	labelling changes, fence code

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.80&r2=1.81
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceDevice.py.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceHandler.py.diff?cvsroot=cluster&r1=1.1&r2=1.2

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 22:30:09	1.80
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/04 15:11:10	1.81
@@ -15,6 +15,7 @@
 from Netfs import Netfs
 from Script import Script
 from Samba import Samba
+from GeneralError import GeneralError
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -583,7 +584,7 @@
     cldata['currentItem'] = False
 
   cladd = {}
-  cladd['Title'] = "Create"
+  cladd['Title'] = "Create a New Cluster"
   cladd['cfg_type'] = "clusteradd"
   cladd['absolute_url'] = url + "?pagetype=" + CLUSTER_ADD
   cladd['Description'] = "Create a Cluster"
@@ -2015,6 +2016,91 @@
 
   return resultlist
 
+def getFenceInfo(self, model, request=None):
+  map = list() 
+  level1 = list()
+  level2 = list()
+  map.append(level1)
+  map.append(level2)
+  nodename = ""
+  if request == None:  #this is being called by the fence device page
+    pass
+  else:
+    try:
+      nodename = request['nodename']
+    except KeyError, e:
+      raise GeneralError('FATAL', "Could not extract nodename from request")
+    
+    #here we need to get fences for a node - just the first two levels
+    #then fill in two data structures with all attr's 
+    try:
+      node = model.retrieveNodeByName(nodename)
+    except GeneralError, e:
+      raise GeneralError('FATAL', "Couldn't find node name in current node list")
+
+    levels = node.getFenceLevels()
+    len_levels = len(levels)
+
+    if len_levels == 0:
+      return map
+
+    for i in xrange(2):
+      fence_struct = {}
+      if levels[i] != None:
+        level = levels[i]
+      else:
+        #No more levels...
+        return map
+      kids = level.getChildren()
+      if len(kids) == 0:
+        continue
+      else:
+        #for each kid, 
+        ### resolve name, find fence device
+        ### Add fd to list, if it is not there yet 
+        ### determine if it is a shared fence type
+        ### if it is a shared device, add instance entry
+        fds = model.getFenceDevices()
+        fence_struct = None
+        for kid in kids:
+          name = kid.getName()
+          found_fd = False
+          for entry in map[i]:
+            if entry['name'] == name:
+              fence_struct = entry
+              found_fd = True
+              break
+          if found_fd == False:
+            for fd in fds:
+              if fd.getName() == name:  #Found the fence device
+                fence_struct = {}
+                fence_struct['isShareable'] = fd.isShared()
+                fd_attrs = fd.getAttributes()
+                kees = fd_attrs.keys()
+                for kee in kees:
+                  fence_struct[kee] = fd_attrs[kee]
+          fi_attrs = kid.getAttributes()
+          kees = fi_attrs.keys()
+          if fence_struct['isShareable'] == True:
+            instance_struct = {}
+            for kee in kees:
+              instance_struct[kee] = fi_attrs[kee]
+              try:
+                  check = fence_struct['instances']
+                  check.append(instance_struct)
+              except KeyError, e:
+                  fence_struct['instances'] = list()
+                  fence_struct['instances'].append(instance_struct) 
+          else:  #Not a shareable fence device type
+            for kee in kees:
+              fence_struct[kee] = fi_attrs[kee]
+      map[i].append(fence_struct)      
+
+    return map    
+      
+
+      
+
 def getLogsForNode(self, request):
   nodename = request['nodename']
   clustername = request['clustername']
--- conga/luci/site/luci/Extensions/FenceDevice.py	2006/05/30 20:17:21	1.1
+++ conga/luci/site/luci/Extensions/FenceDevice.py	2006/10/04 15:11:10	1.2
@@ -14,10 +14,28 @@
     self.fd_attrs = FenceHandler.FENCE_FD_ATTRS
     self.pretty_fence_names = FenceHandler.FENCE_OPTS
     self.pretty_name_attrs = FenceHandler.PRETTY_NAME_ATTRS
+    self.shared_fences = FenceHandler.FENCE_SHARED
+
+
 
   def getAgentType(self):
     return self.attr_hash["agent"]
 
+  def isShared(self):
+    agent = self.getAgentType()
+    if agent == "fence_drac": #2 variants of drac...
+      mname = self.getAttribute("modulename")
+      if mname == None or mname == "":
+        return False
+      else:
+        return True
+
+    try:
+      return self.shared_fences[agent]
+    except KeyError, e:
+      return False
+    
+
   def getProperties(self):
     stringbuf = ""
     agent_type = self.getAgentType()
--- conga/luci/site/luci/Extensions/FenceHandler.py	2006/05/30 20:17:21	1.1
+++ conga/luci/site/luci/Extensions/FenceHandler.py	2006/10/04 15:11:10	1.2
@@ -44,14 +44,32 @@
               "fence_vixel":"Vixel SAN Switch",
               "fence_gnbd":"Global Network Block Device",
               "fence_ilo":"HP ILO Device",
+              "fence_rsa":"IBM RSA II Device",
               "fence_sanbox2":"QLogic SANBox2",
               "fence_bladecenter":"IBM Blade Center",
               "fence_mcdata":"McDATA SAN Switch",
               "fence_egenera":"Egenera SAN Controller",
               "fence_bullpap":"Bull PAP",
+              "fence_drac":"DRAC",
               "fence_ipmilan":"IPMI Lan",
               "fence_manual":"Manual Fencing" }
 
+FENCE_SHARED = {"fence_apc":True,
+              "fence_wti":True,
+              "fence_brocade":True,
+              "fence_vixel":True,
+              "fence_gnbd":True,
+              "fence_ilo":False,
+              "fence_rsa":False,
+              "fence_sanbox2":True,
+              "fence_bladecenter":True,
+              "fence_mcdata":True,
+              "fence_egenera":True,
+              "fence_bullpap":True,
+              "fence_drac":False,
+              "fence_ipmilan":False,
+              "fence_manual":False }
+
 FENCE_FD_ATTRS = {"fence_apc":["name","ipaddr","login","passwd"],
               "fence_wti":["name","ipaddr","passwd"],
               "fence_brocade":["name","ipaddr","login","passwd"],



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-02 22:30 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-02 22:30 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-02 22:30:09

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	more validation and improved fault tolerance code

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.79&r2=1.80

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 21:42:49	1.79
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 22:30:09	1.80
@@ -2185,7 +2185,10 @@
 	modelb = request.SESSION.get('model')
 	form = request.form
 
-	if 'edit' in form and form['edit'] == 'True':
+	if not modelb or not form:
+		return None
+
+	if form.has_key('edit'):
 		try:
 			oldname = form['oldname'].strip()
 			if not oldname:
@@ -2196,153 +2199,387 @@
 	else:
 		res = apply(Ip)
 
+	if not res:
+		return None
+
 	try:
 		addr = form['ip_address'].strip()
 		if not addr:
 			raise KeyError('ip_address is blank')
+		# XXX: validate IP addr
 		res.attr_hash['address'] = addr
 	except KeyError, e:
 		return None
 
-	try:
-		monitor = form['monitorLink'].strip()
-		if monitor == '':
-			raise KeyError('monitorLink is blank.')
-	except KeyError, e:
-		return None
-
-	if monitor == '1' or monitor == 'True':
+	if 'monitorLink' in form:
 		res.attr_hash['monitor_link'] = '1'
 	else:
 		res.attr_hash['monitor_link'] = '0'
+
 	modelb.getResourcesPtr().addChild(res)
 	return res
 
 def addFs(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(Fs)
-  form = request.form
-  res.attr_hash["name"] = form["resourceName"]
-  res.attr_hash["mountpoint"] = form["mountpoint"]
-  res.attr_hash["device"] = form["device"]
-  res.attr_hash["options"] = form["options"]
-  res.attr_hash["fstype"] = form["fstype"]
-  res.attr_hash["fsid"] = form["fsid"]
-  if form.has_key('forceunmount'):
-    res.attr_hash["force_unmount"] = '1'
-  else:
-    res.attr_hash["force_unmount"] = '0'
-
-  if form.has_key('selffence'):
-    res.attr_hash["self_fence"] = '1'
-  else:
-    res.attr_hash["self_fence"] = '0'
-
-  if form.has_key('checkfs'):
-    res.attr_hash["force_fsck"] = '1'
-  else:
-    res.attr_hash["force_fsck"] = '0'
+	modelb = request.SESSION.get('model')
+	form = request.form
 
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	if not modelb or not form:
+		return None
+
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(Fs)
+
+	if not res:
+		return None
+
+	# XXX: sanity check these fields
+	try:
+		name = form['resourceName'].strip()
+		res.attr_hash['name'] = name
+	except:
+		return None
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		res.attr_hash['mountpoint'] = mountpoint
+	except:
+		return None
+
+	try:
+		device = form['device'].strip()
+		res.attr_hash['device'] = device
+	except:
+		return None
+
+	try:
+		options = form['options'].strip()
+		res.attr_hash['options'] = options
+	except:
+		return None
+
+	try:
+		fstype = form['fstype'].strip()
+		res.attr_hash['fstype'] = fstype
+	except:
+		return None
+
+	try:
+		fsid = form['fsid'].strip()
+		res.attr_hash['fsid'] = fsid
+	except:
+		return None
+
+	if form.has_key('forceunmount'):
+		res.attr_hash['force_unmount'] = '1'
+	else:
+		res.attr_hash['force_unmount'] = '0'
+
+	if form.has_key('selffence'):
+		res.attr_hash['self_fence'] = '1'
+	else:
+		res.attr_hash['self_fence'] = '0'
+
+	if form.has_key('checkfs'):
+		res.attr_hash['force_fsck'] = '1'
+	else:
+		res.attr_hash['force_fsck'] = '0'
+
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 def addGfs(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(Clusterfs)
-  form = request.form
-  res.attr_hash["name"] = form["resourceName"]
-  res.attr_hash["mountpoint"] = form["mountpoint"]
-  res.attr_hash["device"] = form["device"]
-  res.attr_hash["options"] = form["options"]
-  res.attr_hash["fsid"] = form["fsid"]
-
-  if form.has_key('forceunmount'):
-    res.attr_hash["force_unmount"] = '1'
-  else:
-    res.attr_hash["force_unmount"] = '0'
+	form = request.form
 
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	modelb = request.SESSION.get('model')
+	if not modelb:
+		return None
+
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(Clusterfs)
+
+	if not res:
+		return None
+
+	# XXX: sanity check these fields
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise
+		res.attr_hash['name'] = name
+	except:
+		return None
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		res.attr_hash['mountpoint'] = mountpoint
+	except:
+		return None
+
+	try:
+		device = form['device'].strip()
+		res.attr_hash['device'] = device
+	except:
+		return None
+
+	try:
+		options = form['options'].strip()
+		res.attr_hash['options'] = options
+	except:
+		return None
+
+	try:
+		fsid = form['fsid'].strip()
+		res.attr_hash['fsid'] = fsid
+	except:
+		return None
+
+	if form.has_key('forceunmount'):
+		res.attr_hash['force_unmount'] = '1'
+	else:
+		res.attr_hash['force_unmount'] = '0'
+
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 def addNfsm(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(Netfs)
-  form = request.form
-  res.attr_hash["name"] = form["resourceName"]
-  res.attr_hash["mountpoint"] = form["mountpoint"]
-  res.attr_hash["host"] = form["host"]
-  res.attr_hash["options"] = form["options"]
-  res.attr_hash["exportpath"] = form["export"]
-  res.attr_hash["nfstype"] = form["fstype"]
-
-  if form.has_key('forceunmount'):
-    res.attr_hash["force_unmount"] = '1'
-  else:
-    res.attr_hash["force_unmount"] = '0'
+	form = request.form
+	modelb = request.SESSION.get('model')
 
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	if not form or not modelb:
+		return None
+
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(Netfs)
+
+	if not res:
+		return None
+
+	# XXX: sanity check these fields
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise
+		res.attr_hash['name'] = name
+	except:
+		return None
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		res.attr_hash['mountpoint'] = mountpoint
+	except:
+		return None
+
+	try:
+		host = form['host'].strip()
+		res.attr_hash['host'] = host
+	except:
+		return None
+
+	try:
+		options = form['options'].strip()
+		res.attr_hash['options'] = options
+	except:
+		return None
+
+	try:
+		exportpath = form['exportpath'].strip()
+		res.attr_hash['exportpath'] = exportpath 
+	except:
+		return None
+
+	try:
+		nfstype = form['nfstype'].strip().lower()
+		if nfstype != 'nfs' and nfstype != 'nfs4':
+			raise
+		res.attr_hash['nfstype'] = nfstype
+	except:
+		return None
+
+	if form.has_key('forceunmount'):
+		res.attr_hash['force_unmount'] = '1'
+	else:
+		res.attr_hash['force_unmount'] = '0'
+
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 def addNfsc(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(NFSClient)
-  form = request.form
-  res.attr_hash["name"] = form["resourceName"]
-  res.attr_hash["target"] = form["target"]
-  res.attr_hash["options"] = form["options"]
+	form = request.form
+	modelb = request.SESSION.get('model')
 
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	if not form or not modelb:
+		return None
+
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(NFSClient)
+
+	if not res:
+		return None
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise
+		res.attr_hash['name'] = name
+	except:
+		return None
+
+	try:
+		target = form['target'].strip()
+		res.attr_hash['target'] = target 
+	except:
+		return None
+
+	try:
+		options = form['options'].strip()
+		res.attr_hash['options'] = options
+	except:
+		return None
+
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 def addNfsx(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(NFSExport)
-  form = request.form
-  res.attr_hash["name"] = form["resourceName"]
+	modelb = request.SESSION.get('model')
+	form = request.form
 
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	if not modelb or not form:
+		return None
+
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(NFSExport)
+
+	if not res:
+		return None
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise
+		res.attr_hash['name'] = name
+	except:
+		return None
+
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 def addScr(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(Script)
-  form = request.form
-  res.attr_hash["name"] = form["resourceName"]
-  res.attr_hash["file"] = form["file"]
+	modelb = request.SESSION.get('model')
+	form = request.form
 
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	if not modelb or not form:
+		return None
+
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(Script)
+
+	if not res:
+		return None
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise
+		res.attr_hash['name'] = name
+	except:
+		return None
+
+	try:
+		file = form['file'].strip()
+		if not file:
+			raise
+		res.attr_hash['file'] = file
+	except:
+		return None
+
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 def addSmb(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(Samba)
-  form = request.form
-  res.attr_hash["name"] = form["resourceName"]
-  res.attr_hash["workgroup"] = form["workgroup"]
+	modelb = request.SESSION.get('model')
+	form = request.form
 
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	if not modelb or not form:
+		return None
+
+	if form.has_key('edit'):
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(Samba)
+
+	if not res:
+		return None
+
+	try:
+		name = form['resourceName'].strip()
+		if not name:
+			raise
+		res.attr_hash['name'] = name
+	except:
+		return None
+
+	try:
+		workgroup = form['workgroup'].strip()
+		res.attr_hash['workgroup'] = workgroup
+	except:
+		return None
+
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 resourceAddHandler = {
 	'ip': addIp,



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-02 21:42 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-02 21:42 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-02 21:42:49

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	more cleanup and robustness improvements

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.78&r2=1.79

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 21:09:27	1.78
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 21:42:49	1.79
@@ -2065,27 +2065,46 @@
   return map
 
 def getClusterOS(self, ragent, request):
-  clustername = request['clustername']
-  ricci_agent = resolve_nodename(self, clustername, ragent)
-  rc = RicciCommunicator(ricci_agent)
-  map = {}
-  map['os'] = rc.os()
-  map['isVirtualized'] = rc.dom0()
-  return map
+	try:
+		clustername = request['clustername']
+	except KeyError, e:
+		try:
+			clustername = request.form['clustername']
+		except:
+			return {}
+	except:
+		return {}
+
+	ricci_agent = resolve_nodename(self, clustername, ragent)
+	rc = RicciCommunicator(ricci_agent)
+	map = {}
+	map['os'] = rc.os()
+	map['isVirtualized'] = rc.dom0()
+	return map
 
 def getResourcesInfo(modelb, request):
-  resList = list()
-  baseurl = request['URL']
-  cluname = request['clustername']
-  for item in modelb.getResources():
-    itemmap = {}
-    itemmap['name'] = item.getName()
-    itemmap['type'] = item.resource_type
-    itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_CONFIG
-    itemmap['url'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE
-    itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_REMOVE
-    resList.append(itemmap)
-  return resList
+	resList = list()
+	baseurl = request['URL']
+
+	try:
+		cluname = request['clustername']
+	except KeyError, e:
+		try:
+			cluname = request.form['clustername']
+		except:
+			return resList
+	except:
+		return resList
+
+	for item in modelb.getResources():
+		itemmap = {}
+		itemmap['name'] = item.getName()
+		itemmap['type'] = item.resource_type
+		itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_CONFIG
+		itemmap['url'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE
+		itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_REMOVE
+		resList.append(itemmap)
+	return resList
 
 def getResourceInfo(modelb, request):
 	try:
@@ -2095,6 +2114,8 @@
 			name = request.form['resourcename']
 		except:
 			return {}
+	except:
+		return {}
 
 	try:
 		cluname = request['clustername']
@@ -2103,10 +2124,12 @@
 			cluname = request.form['clustername']
 		except:
 			return {}
+	except:
+		return {}
 
 	try:
 		baseurl = request['URL']
-	except KeyError, e:
+	except:
 		return {}
 
 	for res in modelb.getResources():
@@ -2158,82 +2181,42 @@
   response = request.RESPONSE
   response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
+def addIp(request):
+	modelb = request.SESSION.get('model')
+	form = request.form
 
+	if 'edit' in form and form['edit'] == 'True':
+		try:
+			oldname = form['oldname'].strip()
+			if not oldname:
+				raise KeyError('oldname is blank.')
+			res = getResourceForEdit(modelb, oldname)
+		except KeyError, e:
+			return None
+	else:
+		res = apply(Ip)
 
-def addResource(self, request, ragent):
-  if not request.form:
-    return "Nothing submitted, no changes made."
-
-  if request.form['type'] != 'ip' and  not request.form['resourceName']:
-    return "Please enter a name for the resource."
-  types = {'ip': addIp,
-           'fs': addFs,
-           'gfs': addGfs,
-           'nfsm': addNfsm,
-           'nfsx': addNfsx,
-           'nfsc': addNfsx,
-           'scr': addScr,
-           'smb': addSmb}
-
-
-  type = request.form["type"]
-  res = types[type](request)
-  modelb = request.SESSION.get('model')
-  modelstr = ""
-  conf = modelb.exportModelAsString()
-  rb = ricci_bridge(ragent)
-  #try:
-  if True:
-    batch_number, result = rb.setClusterConf(str(conf))
-  #except:
-  else:
-    return "Some error occured in setClusterConf\n"
-
-  clustername = request['clustername']
-  path = CLUSTER_FOLDER_PATH + clustername
-  clusterfolder = self.restrictedTraverse(path)
-  batch_id = str(batch_number)
-  objname = ragent + "____flag"
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
-   #Now we need to annotate the new DB object
-  objpath = path + "/" + objname
-  flag = self.restrictedTraverse(objpath)
-  flag.manage_addProperty(BATCH_ID,batch_id, "string")
-  flag.manage_addProperty(TASKTYPE,RESOURCE_ADD, "string")
-  if type != 'ip':
-	  flag.manage_addProperty(FLAG_DESC,"Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
-  else:
-	  flag.manage_addProperty(FLAG_DESC,"Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
-  response = request.RESPONSE
-  response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
-
-
-def getResourceForEdit(modelb, name):
-  resPtr = modelb.getResourcesPtr()
-  resources = resPtr.getChildren()
-
-  for res in resources:
-    if res.getName() == name:
-      resPtr.removeChild(res)
-      break
+	try:
+		addr = form['ip_address'].strip()
+		if not addr:
+			raise KeyError('ip_address is blank')
+		res.attr_hash['address'] = addr
+	except KeyError, e:
+		return None
 
-  return res
+	try:
+		monitor = form['monitorLink'].strip()
+		if monitor == '':
+			raise KeyError('monitorLink is blank.')
+	except KeyError, e:
+		return None
 
-def addIp(request):
-  modelb = request.SESSION.get('model')
-  if request.form.has_key('edit'):
-    res = getResourceForEdit(modelb, request.form['oldname'])
-  else:
-    res = apply(Ip)
-  form = request.form
-  addr = form['ip_address']
-  res.attr_hash["address"] = addr
-  if form.has_key('monitorLink'):
-    res.attr_hash["monitor_link"] = '1'
-  else:
-    res.attr_hash["monitor_link"] = '0'
-  modelb.getResourcesPtr().addChild(res)
-  return res
+	if monitor == '1' or monitor == 'True':
+		res.attr_hash['monitor_link'] = '1'
+	else:
+		res.attr_hash['monitor_link'] = '0'
+	modelb.getResourcesPtr().addChild(res)
+	return res
 
 def addFs(request):
   modelb = request.SESSION.get('model')
@@ -2361,6 +2344,77 @@
   modelb.getResourcesPtr().addChild(res)
   return res
 
+resourceAddHandler = {
+	'ip': addIp,
+	'fs': addFs,
+	'gfs': addGfs,
+	'nfsm': addNfsm,
+	'nfsx': addNfsx,
+	'nfsc': addNfsx,
+	'scr': addScr,
+	'smb': addSmb
+}
+
+def addResource(self, request, ragent):
+	if not request.form:
+		return (False, {'errors': 'No form was submitted.'})
+
+	try:
+		type = request.form['type'].strip()
+		if not type or not type in resourceAddHandler:
+			raise
+	except:
+		return (False, {'errors': 'Form type is missing.'})
+
+	try:
+		resname = request.form['resourceName']
+	except KeyError, e:
+		# For IP, the IP address itself is the name.
+		if request.form['type'] != 'ip':
+			return (False, {'errors': 'No resource name was given.'})
+
+	res = resourceAddHandler[type](request)
+	modelb = request.SESSION.get('model')
+	modelstr = ""
+	conf = modelb.exportModelAsString()
+	rb = ricci_bridge(ragent)
+	#try:
+	if True:
+		batch_number, result = rb.setClusterConf(str(conf))
+	#except:
+	else:
+		return "Some error occured in setClusterConf\n"
+
+	clustername = request['clustername']
+	path = CLUSTER_FOLDER_PATH + clustername
+	clusterfolder = self.restrictedTraverse(path)
+	batch_id = str(batch_number)
+	objname = ragent + "____flag"
+	clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+	#Now we need to annotate the new DB object
+	objpath = path + "/" + objname
+	flag = self.restrictedTraverse(objpath)
+	flag.manage_addProperty(BATCH_ID,batch_id, "string")
+	flag.manage_addProperty(TASKTYPE,RESOURCE_ADD, "string")
+	if type != 'ip':
+		flag.manage_addProperty(FLAG_DESC,"Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
+	else:
+		flag.manage_addProperty(FLAG_DESC,"Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+	response = request.RESPONSE
+	response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
+
+def getResourceForEdit(modelb, name):
+  resPtr = modelb.getResourcesPtr()
+  resources = resPtr.getChildren()
+
+  for res in resources:
+    if res.getName() == name:
+      resPtr.removeChild(res)
+      break
+
+  return res
+
+
 def appendModel(request, model):
 	try:
 		request.SESSION.set('model', model)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-02 21:09 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-02 21:09 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-02 21:09:28

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix typo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.77&r2=1.78

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 20:53:37	1.77
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 21:09:27	1.78
@@ -2091,20 +2091,22 @@
 	try:
 		name = request['resourcename']
 	except KeyError, e:
-		name = request.form['resourcename']
-	except:
-		return {}
+		try:
+			name = request.form['resourcename']
+		except:
+			return {}
 
 	try:
 		cluname = request['clustername']
 	except KeyError, e:
-		cluname = request.form['clustername']
-	except:
-		return {}
+		try:
+			cluname = request.form['clustername']
+		except:
+			return {}
 
 	try:
 		baseurl = request['URL']
-	except:
+	except KeyError, e:
 		return {}
 
 	for res in modelb.getResources():



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-10-02 20:53 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-10-02 20:53 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-02 20:53:37

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix some of the resource backend code to accept POST as well as GET

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.76&r2=1.77

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/28 22:04:27	1.76
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 20:53:37	1.77
@@ -21,7 +21,7 @@
 #folder, then only the admin user may see this menu, and
 #the configure option should not be displayed.
 #2)If there are clusters in the ManagedClusterSystems,
-#then only display chooser if the current user has 
+#then only display chooser if the current user has
 #permissions on at least one. If the user is admin, show ALL clusters
 
 from homebase_adapters import nodeAuth, nodeUnauth, manageCluster, createClusterSystems
@@ -187,11 +187,11 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,CLUSTER_ADD, "string")
     flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
-  
+
 
 def validateAddClusterNode(self, request):
 	errors = list()
-	messages = list() 
+	messages = list()
 	requestResults = {}
 
 	try:
@@ -542,7 +542,7 @@
 
 def createCluChooser(self, request, systems):
   dummynode = {}
-  
+
   if request.REQUEST_METHOD == 'POST':
     ret = validatePost(self, request)
     try:
@@ -553,7 +553,7 @@
     try: request.SESSION.set('checkRet', {})
     except: pass
 
-  #First, see if a cluster is chosen, then 
+  #First, see if a cluster is chosen, then
   #check that the current user can access that system
   cname = None
   try:
@@ -581,7 +581,7 @@
     cldata['currentItem'] = True
   else:
     cldata['currentItem'] = False
-  
+
   cladd = {}
   cladd['Title'] = "Create"
   cladd['cfg_type'] = "clusteradd"
@@ -591,7 +591,7 @@
     cladd['currentItem'] = True
   else:
     cladd['currentItem'] = False
-  
+
   clcfg = {}
   clcfg['Title'] = "Configure"
   clcfg['cfg_type'] = "clustercfg"
@@ -609,8 +609,8 @@
     clcfg['show_children'] = True
   else:
     clcfg['show_children'] = False
-  
-  #loop through all clusters 
+
+  #loop through all clusters
   syslist= list()
   for system in systems:
     clsys = {}
@@ -629,7 +629,7 @@
     syslist.append(clsys)
 
   clcfg['children'] = syslist
-  
+
   mylist = list()
   mylist.append(cldata)
   mylist.append(cladd)
@@ -674,7 +674,7 @@
     nd['currentItem'] = True
   else:
     nd['currentItem'] = False
-    
+
 
   ndadd = {}
   ndadd['Title'] = "Add a Node"
@@ -685,7 +685,7 @@
     ndadd['currentItem'] = True
   else:
     ndadd['currentItem'] = False
-  
+
   ndcfg = {}
   ndcfg['Title'] = "Configure"
   ndcfg['cfg_type'] = "nodecfg"
@@ -699,11 +699,11 @@
     ndcfg['currentItem'] = True
   else:
     ndcfg['currentItem'] = False
-  
+
   nodes = model.getNodes()
   nodenames = list()
   for node in nodes:
-    nodenames.append(node.getName()) 
+    nodenames.append(node.getName())
 
   cfgablenodes = list()
   for nodename in nodenames:
@@ -723,9 +723,9 @@
         cfg['currentItem'] = False
     else:
       cfg['currentItem'] = False
-      
+
     cfgablenodes.append(cfg)
-  
+
   #Now add nodename structs as children of the config element
   ndcfg['children'] = cfgablenodes
 
@@ -749,7 +749,7 @@
     sv['currentItem'] = True
   else:
     sv['currentItem'] = False
-    
+
   svadd = {}
   svadd['Title'] = "Add a Service"
   svadd['cfg_type'] = "serviceadd"
@@ -759,7 +759,7 @@
     svadd['currentItem'] = True
   else:
     svadd['currentItem'] = False
-    
+
   svcfg = {}
   svcfg['Title'] = "Configure a Service"
   svcfg['cfg_type'] = "servicecfg"
@@ -794,16 +794,16 @@
         svc['currentItem'] = False
     else:
       svc['currentItem'] = False
-      
+
     serviceable.append(svc)
-  svcfg['children'] = serviceable  
+  svcfg['children'] = serviceable
+
 
 
-  
   kids = list()
   kids.append(svadd)
   kids.append(svcfg)
-  sv['children'] = kids  
+  sv['children'] = kids
 #############################################################
   rv = {}
   rv['Title'] = "Resources"
@@ -818,7 +818,7 @@
     rv['currentItem'] = True
   else:
     rv['currentItem'] = False
-    
+
   rvadd = {}
   rvadd['Title'] = "Add a Resource"
   rvadd['cfg_type'] = "resourceadd"
@@ -828,7 +828,7 @@
     rvadd['currentItem'] = True
   else:
     rvadd['currentItem'] = False
-    
+
   rvcfg = {}
   rvcfg['Title'] = "Configure a Resource"
   rvcfg['cfg_type'] = "resourcecfg"
@@ -863,17 +863,17 @@
         rvc['currentItem'] = False
     else:
       rvc['currentItem'] = False
-      
+
     resourceable.append(rvc)
-  rvcfg['children'] = resourceable  
+  rvcfg['children'] = resourceable
+
 
 
-  
   kids = list()
   kids.append(rvadd)
   kids.append(rvcfg)
-  rv['children'] = kids  
- ################################################################# 
+  rv['children'] = kids
+ #################################################################
   fd = {}
   fd['Title'] = "Failover Domains"
   fd['cfg_type'] = "failoverdomains"
@@ -887,7 +887,7 @@
     fd['currentItem'] = True
   else:
     fd['currentItem'] = False
-    
+
   fdadd = {}
   fdadd['Title'] = "Add a Failover Domain"
   fdadd['cfg_type'] = "failoverdomainadd"
@@ -897,7 +897,7 @@
     fdadd['currentItem'] = True
   else:
     fdadd['currentItem'] = False
-    
+
   fdcfg = {}
   fdcfg['Title'] = "Configure a Failover Domain"
   fdcfg['cfg_type'] = "failoverdomaincfg"
@@ -932,16 +932,16 @@
         fdc['currentItem'] = False
     else:
       fdc['currentItem'] = False
-      
+
     fdomable.append(fdc)
-  fdcfg['children'] = fdomable  
+  fdcfg['children'] = fdomable
+
 
 
-  
   kids = list()
   kids.append(fdadd)
   kids.append(fdcfg)
-  fd['children'] = kids  
+  fd['children'] = kids
 #############################################################
   fen = {}
   fen['Title'] = "Fence Devices"
@@ -956,7 +956,7 @@
     fen['currentItem'] = True
   else:
     fen['currentItem'] = False
-    
+
   fenadd = {}
   fenadd['Title'] = "Add a Fence Device"
   fenadd['cfg_type'] = "fencedeviceadd"
@@ -966,7 +966,7 @@
     fenadd['currentItem'] = True
   else:
     fenadd['currentItem'] = False
-    
+
   fencfg = {}
   fencfg['Title'] = "Configure a Fence Device"
   fencfg['cfg_type'] = "fencedevicecfg"
@@ -1001,16 +1001,16 @@
         fenc['currentItem'] = False
     else:
       fenc['currentItem'] = False
-      
+
     fenceable.append(fenc)
-  fencfg['children'] = fenceable  
+  fencfg['children'] = fenceable
+
 
 
-  
   kids = list()
   kids.append(fenadd)
   kids.append(fencfg)
-  fen['children'] = kids  
+  fen['children'] = kids
 #############################################################
 
   mylist = list()
@@ -1026,10 +1026,10 @@
 
 
 def getClusterName(self, model):
-  return model.getClusterName() 
+  return model.getClusterName()
 
 def getClusterAlias(self, model):
-  alias = model.getClusterAlias() 
+  alias = model.getClusterAlias()
   if alias == None:
     return model.getClusterName()
   else:
@@ -1060,16 +1060,16 @@
     base2 = req['HTTP_HOST'] + req['SERVER_PORT']
 
   htab = { 'Title':"homebase",
-           'Description':"Home base for this luci server", 
+           'Description':"Home base for this luci server",
            'Taburl':"/luci/homebase"}
   if selectedtab == "homebase":
     htab['isSelected'] = True
   else:
     htab['isSelected'] = False
-      
+
 
   ctab = { 'Title':"cluster",
-           'Description':"Cluster configuration page", 
+           'Description':"Cluster configuration page",
            'Taburl':"/luci/cluster?pagetype=3"}
   if selectedtab == "cluster":
     ctab['isSelected'] = True
@@ -1077,16 +1077,16 @@
     ctab['isSelected'] = False
 
   stab = { 'Title':"storage",
-           'Description':"Storage configuration page", 
+           'Description':"Storage configuration page",
            'Taburl':"/luci/storage"}
   if selectedtab == "storage":
     stab['isSelected'] = True
   else:
     stab['isSelected'] = False
 
-  portaltabs.append(htab) 
-  portaltabs.append(ctab) 
-  portaltabs.append(stab) 
+  portaltabs.append(htab)
+  portaltabs.append(ctab)
+  portaltabs.append(stab)
 
   return portaltabs
 
@@ -1123,7 +1123,7 @@
     return None
   else:
     return None
-  
+
 def getRicciAgent(self, clustername):
   #Check cluster permission here! return none if false
   path = CLUSTER_FOLDER_PATH + clustername[0]
@@ -1137,7 +1137,7 @@
     return ""
   else:
     return ""
-  
+
 
 def getClusterStatus(self, ricci_name):
   rb = ricci_bridge(ricci_name)
@@ -1177,8 +1177,8 @@
       vals['failed'] = node.getAttribute('failed')
       vals['autostart'] = node.getAttribute('autostart')
       results.append(vals)
-    
-  return results  
+
+  return results
 
 def getServicesInfo(self, status, modelb, req):
   map = {}
@@ -1190,7 +1190,7 @@
       itemmap = {}
       itemmap['name'] = item['name']
       if item['running'] == "true":
-        itemmap['running'] = "true" 
+        itemmap['running'] = "true"
         itemmap['nodename'] = item['nodename']
       itemmap['autostart'] = item['autostart']
       itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE
@@ -1206,7 +1206,7 @@
   map['services'] = maplist
 
   return map
-    
+
 def getServiceInfo(self,status,modelb,req):
   #set up struct for service config page
   baseurl = req['URL']
@@ -1269,7 +1269,7 @@
     children = svc.getChildren()
     for child in children:
       recurse_resources(root_uuid, child, resource_list, indent_ctr)
-      
+
   hmap['resource_list'] = resource_list
   return hmap
 
@@ -1289,7 +1289,7 @@
     rc_map['type'] = child.getResourceType()
 
   rc_map['indent_ctr'] = indent_ctr
-    
+
   #Note: Final version needs all resource attrs
   rc_map['attrs'] = child.getAttributes()
   rc_map['uuid'] = make_uuid('resource')
@@ -1305,7 +1305,7 @@
 
   rc_map['max_depth'] = child_depth
   return child_depth + 1
-    
+
 def serviceStart(self, ricci_agent, req):
   rb = ricci_bridge(ricci_agent)
   svcname = req['servicename']
@@ -1356,7 +1356,7 @@
   flag = self.restrictedTraverse(objpath)
   #flag[BATCH_ID] = batch_id
   #flag[TASKTYPE] = SERVICE_RESTART
-  #flag[FLAG_DESC] = "Restarting service " + svcname 
+  #flag[FLAG_DESC] = "Restarting service " + svcname
   flag.manage_addProperty(BATCH_ID,batch_id, "string")
   flag.manage_addProperty(TASKTYPE,SERVICE_RESTART, "string")
   flag.manage_addProperty(FLAG_DESC,"Restarting service " + svcname, "string")
@@ -1427,7 +1427,7 @@
       for nitem in nlist:
         if nitem['name'] == ndname:
           break
-      nodesmap['nodename'] = ndname 
+      nodesmap['nodename'] = ndname
       nodesmap['nodecfgurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + ndname + "&pagetype=" + NODE
       if nitem['clustered'] == "true":
         nodesmap['status'] = NODE_ACTIVE
@@ -1460,7 +1460,7 @@
     fdom_map['svclist'] = svclist
     fdomlist.append(fdom_map)
   return fdomlist
- 
+
 def processClusterProps(self, ricci_agent, request):
   #First, retrieve cluster.conf from session
   conf = request.SESSION.get('conf')
@@ -1502,8 +1502,8 @@
 
   else:
     return
-  
- 
+
+
 def getClusterInfo(self, model, req):
   cluname = req[CLUNAME]
   baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
@@ -1546,7 +1546,7 @@
   else:
     map['is_mcast'] = "False"
     map['mcast_addr'] = "1.2.3.4"
-    
+
   #-------------
   #quorum disk params
   quorumd_url = baseurl + ACTIONTYPE + "=" + QUORUMD
@@ -1710,7 +1710,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
-                                                                                
+
     response = request.RESPONSE
     #Is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1730,7 +1730,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
-                                                                                
+
     response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1751,7 +1751,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
-                                                                                
+
     response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1789,7 +1789,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
-                                                                                
+
     response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1799,7 +1799,7 @@
     #We need to get a node name other than the node
     #to be deleted, then delete the node from the cluster.conf
     #and propogate it. We will need two ricci agents for this task.
-    
+
     #First, delete cluster.conf from node to be deleted.
 
     #next, have node leave cluster.
@@ -1853,7 +1853,7 @@
     flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
     response = request.RESPONSE
     response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
-  
+
 
 def getNodeInfo(self, model, status, request):
   infohash = {}
@@ -1882,17 +1882,17 @@
     nodestate = NODE_ACTIVE
   else:
     nodestate = NODE_INACTIVE
-                                                                                
+
   infohash['nodestate'] = nodestate
   infohash['nodename'] = nodename
-                                                                                
+
   #set up drop down links
   if nodestate == NODE_ACTIVE:
     infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['fence_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
-                                                                                
+
   if nodestate == NODE_INACTIVE:
     infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
@@ -1909,7 +1909,7 @@
       svc_dict['servicename'] = svcname
       svc_dict['svcurl'] = svcurl
       svc_dict_list.append(svc_dict)
-                                                                                
+
   infohash['currentservices'] = svc_dict_list
 
   #next is faildoms
@@ -1921,9 +1921,9 @@
     fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName()
     fdom_dict['fdomurl'] = fdomurl
     fdom_dict_list.append(fdom_dict)
-                                                                              
+
   infohash['fdoms'] = fdom_dict_list
-                                                                                
+
   #return infohash
   infohash['d_states'] = None
   if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
@@ -1936,10 +1936,10 @@
     dlist.append("rgmanager")
     states = rb.getDaemonStates(dlist)
     infohash['d_states'] = states
-    
-  infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername                                                                            
+
+  infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername
   return infohash
-  #get list of faildoms for node  
+  #get list of faildoms for node
 
 def getNodesInfo(self, model,status,req):
   resultlist = list()
@@ -1973,14 +1973,14 @@
       map['status'] = NODE_INACTIVE
       map['status_str'] = NODE_INACTIVE_STR
 
-    map['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + name + "&clustername=" + clustername                                                                            
+    map['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + name + "&clustername=" + clustername
     #set up URLs for dropdown menu...
     if map['status'] == NODE_ACTIVE:
       map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
       map['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
       map['fence_it_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + name + "&clustername=" + clustername
       map['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + name + "&clustername=" + clustername
-                                                                                
+
     if map['status'] == NODE_INACTIVE:
       map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
       map['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
@@ -2028,7 +2028,7 @@
   cluname = req['clustername']
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
-  items = clusterfolder.objectItems('ManagedSystem') 
+  items = clusterfolder.objectItems('ManagedSystem')
   #Ok, here is what is going on...if there is an item,
   #we need to call the ricci_bridge and get a batch report.
   #This report will tell us one of three things:
@@ -2040,7 +2040,7 @@
     #Check here for more than 1 entry (an error)
     ricci = item[0].split("____") #This removes the 'flag' suffix
     rb = ricci_bridge(ricci[0])
-    finished = rb.checkBatch(item[1].getProperty(BATCH_ID)) 
+    finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
     if finished == True:
       clusterfolder.manage_delObjects(item[0])
       map['refreshurl'] = '5; url=\".\"'
@@ -2054,7 +2054,7 @@
       if dex != (-1):
         tmpstr = part2[:dex] #This strips off busyfirst var
         part2 = tmpstr
-        ###FIXME - The above assumes that the 'busyfirst' query var is at the 
+        ###FIXME - The above assumes that the 'busyfirst' query var is at the
         ###end of the URL...
       wholeurl = part1 + "?" + part2
       #map['url'] = "5, url=" + req['ACTUAL_URL'] + "?" + req['QUERY_STRING']
@@ -2086,31 +2086,45 @@
     itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_REMOVE
     resList.append(itemmap)
   return resList
-                                                                                
-def getResourceInfo(modelb, request):
-	resMap = {}
 
+def getResourceInfo(modelb, request):
 	try:
 		name = request['resourcename']
-		baseurl = request['URL']
+	except KeyError, e:
+		name = request.form['resourcename']
+	except:
+		return {}
+
+	try:
 		cluname = request['clustername']
+	except KeyError, e:
+		cluname = request.form['clustername']
+	except:
+		return {}
+
+	try:
+		baseurl = request['URL']
+	except:
+		return {}
 
-		for res in modelb.getResources():
-			if res.getName() == name:
+	for res in modelb.getResources():
+		if res.getName() == name:
+			resMap = {}
+			try:
 				resMap['name'] = res.getName()
 				resMap['type'] = res.resource_type
 				resMap['tag_name'] = res.TAG_NAME
 				resMap['attrs'] = res.attr_hash
 				resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
 				return resMap
-	except: pass
-	return {}
+			except:
+				return {}
 
 def delResource(self, request, ragent):
   modelb = request.SESSION.get('model')
   resPtr = modelb.getResourcesPtr()
   resources = resPtr.getChildren()
-  name = request['resourcename']                                                                             
+  name = request['resourcename']
   for res in resources:
     if res.getName() == name:
       resPtr.removeChild(res)
@@ -2143,11 +2157,11 @@
   response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
 
-  
+
 def addResource(self, request, ragent):
   if not request.form:
     return "Nothing submitted, no changes made."
-                                                                                
+
   if request.form['type'] != 'ip' and  not request.form['resourceName']:
     return "Please enter a name for the resource."
   types = {'ip': addIp,
@@ -2158,8 +2172,8 @@
            'nfsc': addNfsx,
            'scr': addScr,
            'smb': addSmb}
-  
-  
+
+
   type = request.form["type"]
   res = types[type](request)
   modelb = request.SESSION.get('model')
@@ -2195,12 +2209,12 @@
 def getResourceForEdit(modelb, name):
   resPtr = modelb.getResourcesPtr()
   resources = resPtr.getChildren()
-                                                                               
+
   for res in resources:
     if res.getName() == name:
       resPtr.removeChild(res)
       break
-                                                                              
+
   return res
 
 def addIp(request):
@@ -2236,20 +2250,20 @@
     res.attr_hash["force_unmount"] = '1'
   else:
     res.attr_hash["force_unmount"] = '0'
-                                                                                
+
   if form.has_key('selffence'):
     res.attr_hash["self_fence"] = '1'
   else:
     res.attr_hash["self_fence"] = '0'
-                                                                                
+
   if form.has_key('checkfs'):
     res.attr_hash["force_fsck"] = '1'
   else:
     res.attr_hash["force_fsck"] = '0'
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addGfs(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2262,12 +2276,12 @@
   res.attr_hash["device"] = form["device"]
   res.attr_hash["options"] = form["options"]
   res.attr_hash["fsid"] = form["fsid"]
-                                                                                
+
   if form.has_key('forceunmount'):
     res.attr_hash["force_unmount"] = '1'
   else:
     res.attr_hash["force_unmount"] = '0'
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
 
@@ -2284,15 +2298,15 @@
   res.attr_hash["options"] = form["options"]
   res.attr_hash["exportpath"] = form["export"]
   res.attr_hash["nfstype"] = form["fstype"]
-                                                                                
+
   if form.has_key('forceunmount'):
     res.attr_hash["force_unmount"] = '1'
   else:
     res.attr_hash["force_unmount"] = '0'
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addNfsc(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2303,10 +2317,10 @@
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["target"] = form["target"]
   res.attr_hash["options"] = form["options"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addNfsx(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2315,7 +2329,7 @@
     res = apply(NFSExport)
   form = request.form
   res.attr_hash["name"] = form["resourceName"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
 
@@ -2328,10 +2342,10 @@
   form = request.form
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["file"] = form["file"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addSmb(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2341,16 +2355,15 @@
   form = request.form
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["workgroup"] = form["workgroup"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def appendModel(request, model):
-  try:
-    request.SESSION.set('model', model)
-  except:
-    pass
-  return
+	try:
+		request.SESSION.set('model', model)
+	except:
+		pass
 
 def resolve_nodename(self, clustername, nodename):
   path = CLUSTER_FOLDER_PATH + clustername



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-28 22:04 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-09-28 22:04 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-09-28 22:04:27

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	flesh out more stubs

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.75&r2=1.76

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/28 20:10:29	1.75
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/28 22:04:27	1.76
@@ -330,7 +330,7 @@
 	try:
 		interval = int(form['interval'])
 		if interval < 0:
-			raise ValueError('Interval must be 0 or greater')
+			raise ValueError('Interval must be 0 or greater.')
 	except KeyError, e:
 		errors.append('No Interval value was given.')
 	except ValueError, e:
@@ -365,18 +365,71 @@
 
 	try:
 		device = form['device'].strip()
+		if not device:
+			raise KeyError('device')
 	except KeyError, e:
 		errors.append('No Device value was given.')
 
 	try:
 		label = form['label'].strip()
+		if not label:
+			raise KeyError('label')
 	except KeyError, e:
 		errors.append('No Label value was given.')
 
+	num_heuristics = 0
+	try:
+		num_heuristics = int(form['num_heuristics'])
+		if num_heuristics < 0:
+			raise ValueError(form['num_heuristics'])
+		if num_heuristics == 0:
+			num_heuristics = 1
+	except KeyError, e:
+		errors.append('No number of heuristics was given.')
+	except ValueError, e:
+		errors.append('An invalid number of heuristics was given: ' + e)
+
+	heuristics = list()
+	for i in xrange(num_heuristics):
+		prefix = 'heuristic' + str(i) + ':'
+		try:
+			hname = form[prefix + 'hname'].strip()
+			if not hname:
+				raise KeyError(prefix + 'hname')
+		except KeyError, e:
+			if ((not prefix + 'hpath' in form or not form['hpath'].strip()) and
+				(not prefix + 'hint' in form or not form['hint'].strip()) and
+				(not prefix + 'hscore' in form or not form['hscore'].strip())):
+				# The row is blank; ignore it.
+				continue
+			errors.append('No heuristic name was given for heuristic #' + str(i + 1))
+
+		try:
+			hpath = form[prefix + 'hpath']
+		except KeyError, e:
+			errors.append('No heuristic path was given for heuristic #' + str(i + 1))
+
+		try:
+			hint = int(form[prefix + 'hint'])
+			if hint < 1:
+				raise ValueError('Heuristic interval values must be greater than 0.')
+		except KeyError, e:
+			errors.append('No heuristic interval was given for heuristic #' + str(i + 1))
+		except ValueError, e:
+			errors.append('An invalid heuristic interval was given for heuristic #' + str(i + 1) + ': ' + e)
+
+		try:
+			hscore = int(form[prefix + 'score'])
+			if hscore < 1:
+				raise ValueError('Heuristic scores must be greater than 0.')
+		except KeyError, e:
+			errors.append('No heuristic score was given for heuristic #' + str(i + 1))
+		except ValueError, e:
+			errors.append('An invalid heuristic score was given for heuristic #' + str(i + 1) + ': ' + e)
+		heuristics.append([ hname, hpath, hint, hscore ])
+
 	if len(errors) > 0:
 		return (False, {'errors': errors })
-
-	# heur: heuristicN:hname heuristicN:hprog heuristicN:hint heuristicN:score
 	return (True, {'messages': 'Changes accepted. - FILL ME IN'})
 
 def validateGeneralConfig(self, form):
@@ -384,6 +437,8 @@
 
 	try:
 		cluster_name = form['cluname'].strip()
+		if not cluster_name:
+			raise KeyError('cluname')
 	except KeyError, e:
 		errors.append('No cluster name was given.')
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-28 20:10 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-09-28 20:10 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-09-28 20:10:29

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	flesh out more of the backend form handlers

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.74&r2=1.75

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 18:46:08	1.74
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/28 20:10:29	1.75
@@ -1,3 +1,4 @@
+import socket
 from ModelBuilder import ModelBuilder
 from ZPublisher import HTTPRequest
 import AccessControl
@@ -278,8 +279,185 @@
 def validateResourceEdit(self, request):
 	return (True, {})
 
+
+## Cluster properties form validation routines
+
+def validateMCastConfig(self, form):
+	try:
+		mcast_val = form['mcast'].strip().lower()
+		if mcast_val != 'true' and mcast_val != 'false':
+			raise KeyError(mcast_val)
+		if mcast_val == 'true':
+			mcast_val = 1
+		else:
+			mcast_val = 0
+	except KeyError, e:
+		return (False, {'errors': 'An invalid multicast selection was made.'})
+
+	if not mcast_val:
+		return (True, {'messages': 'Changes accepted. - FILL ME IN'})
+
+	try:
+		addr_str = form['mcast_addr'].strip()
+		socket.inet_pton(socket.AF_INET, addr_str)
+	except KeyError, e:
+		return (False, {'errors': 'No multicast address was given'})
+	except socket.error, e:
+		try:
+			socket.inet_pton(socket.AF_INET6, addr_str)
+		except socket.error, e6:
+			return (False, {'errors': 'An invalid multicast address was given: ' + e})
+
+	return (True, {'messages': 'Changes accepted. - FILL ME IN'})
+
+def validateQDiskConfig(self, form):
+	errors = list()
+
+	try:
+		qdisk_val = form['quorumd'].strip().lower()
+		if qdisk_val != 'true' and qdisk_val != 'false':
+			raise KeyError(qdisk_val)
+		if qdisk_val == 'true':
+			qdisk_val = 1
+		else:
+			qdisk_val = 0
+	except KeyError, e:
+		return (False, {'errors': 'An invalid quorum partition selection was made.'})
+
+	if not qdisk_val:
+		return (True, {'messages': 'Changes accepted. - FILL ME IN'})
+
+	try:
+		interval = int(form['interval'])
+		if interval < 0:
+			raise ValueError('Interval must be 0 or greater')
+	except KeyError, e:
+		errors.append('No Interval value was given.')
+	except ValueError, e:
+		errros.append('An invalid Interval value was given: ' + e)
+
+	try:
+		votes = int(form['votes'])
+		if votes < 1:
+			raise ValueError('Votes must be greater than 0')
+	except KeyError, e:
+		errors.append('No Votes value was given.')
+	except ValueError, e:
+		errors.append('An invalid Votes value was given: ' + e)
+
+	try:
+		tko = int(form['tko'])
+		if tko < 0:
+			raise ValueError('TKO must be 0 or greater')
+	except KeyError, e:
+		errors.append('No TKO value was given.')
+	except ValueError, e:
+		errors.append('An invalid TKO value was given: ' + e)
+
+	try:
+		min_score = int(form['min_score'])
+		if min_score < 1:
+			raise ValueError('Minimum Score must be greater than 0')
+	except KeyError, e:
+		errors.append('No Minimum Score value was given.')
+	except ValueError, e:
+		errors.append('An invalid Minimum Score value was given: ' + e)
+
+	try:
+		device = form['device'].strip()
+	except KeyError, e:
+		errors.append('No Device value was given.')
+
+	try:
+		label = form['label'].strip()
+	except KeyError, e:
+		errors.append('No Label value was given.')
+
+	if len(errors) > 0:
+		return (False, {'errors': errors })
+
+	# heur: heuristicN:hname heuristicN:hprog heuristicN:hint heuristicN:score
+	return (True, {'messages': 'Changes accepted. - FILL ME IN'})
+
+def validateGeneralConfig(self, form):
+	errors = list()
+
+	try:
+		cluster_name = form['cluname'].strip()
+	except KeyError, e:
+		errors.append('No cluster name was given.')
+
+	try:
+		version_num = int(form['cfgver'])
+		if version_num < 0:
+			raise ValueError('configuration version numbers must be 0 or greater.')
+	except KeyError, e:
+		errors.append('No cluster configuration version was given.')
+	except ValueError, e:
+		errors.append('An invalid configuration version was given: ' + e)
+
+	if len(errors) > 0:
+		return (False, {'errors': errors})
+
+	return (True, {'messages': 'Changes accepted. - FILL ME IN'})
+
+def validateFenceConfig(self, form):
+	errors = list()
+
+	try:
+		post_fail_delay = int(form['post_fail_delay'])
+		if post_fail_delay < 0:
+			raise ValueError('post fail delay values must be 0 or greater.')
+	except KeyError, e:
+		errors.append('No post fail delay was given.')
+	except ValueError, e:
+		errors.append('Invalid post fail delay: ' + e)
+
+	try:
+		post_join_delay = int(form['post_join_delay'])
+		if post_join_delay < 0:
+			raise ValueError('post join delay values must be 0 or greater.')
+	except KeyError, e:
+		errors.append('No post join delay was given.')
+	except ValueError, e:
+		errors.append('Invalid post join delay: ' + e)
+
+	if len(errors) > 0:
+		return (False, {'errors': errors })
+
+	return (True, {'messages': 'Changes accepted. - FILL ME IN'})
+
+configFormValidators = {
+	'general': validateGeneralConfig,
+	'mcast': validateMCastConfig,
+	'fence': validateFenceConfig,
+	'qdisk': validateQDiskConfig
+}
+
 def validateConfigCluster(self, request):
-	return (True, {})
+	errors = list()
+	messages = list()
+
+	if not 'form' in request:
+		return (False, {'errors': 'No form was submitted.' })
+	if not 'configtype' in request.form:
+		return (False, {'errors': 'No configuration type was submitted.' })
+	if not request.form['configtype'] in configFormValidators:
+		return (False, {'errors': 'An invalid configuration type was submitted.' })
+
+	val = configFormValidators[request.form['configtype']]
+	ret = val(self, request.form)
+
+	retcode = ret[0]
+	if 'errors' in ret[1]:
+		errors.extend(ret[1]['errors'])
+	if 'messages' in ret[1]:
+		messages.extend(ret[1]['messages'])
+
+	if len(errors) < 1:
+		messages.append('The cluster properties have been updated.')
+
+	return (retcode, {'errors': errors, 'messages': messages})
 
 def validateFenceAdd(self, request):
 	return (True, {})
@@ -296,7 +474,7 @@
 	31: validateResourceAdd,
 	33: validateResourceEdit,
 	51: validateFenceAdd,
-	50: validateFenceEdit
+	50: validateFenceEdit,
 }
 
 def validatePost(self, request):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-27 18:46 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-09-27 18:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-09-27 18:46:08

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	stub in some validation functions

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.73&r2=1.74

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 16:18:03	1.73
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 18:46:08	1.74
@@ -266,9 +266,37 @@
 	buildClusterCreateFlags(self, batch_id_map, clusterName)
 	return (True, {'errors': errors, 'messages': messages})
 
+def validateServiceEdit(self, request):
+	return (True, {})
+
+def validateServiceAdd(self, request):
+	return (True, {})
+
+def validateResourceAdd(self, request):
+	return (True, {})
+	
+def validateResourceEdit(self, request):
+	return (True, {})
+
+def validateConfigCluster(self, request):
+	return (True, {})
+
+def validateFenceAdd(self, request):
+	return (True, {})
+
+def validateFenceEdit(self, request):
+	return (True, {})
+
 formValidators = {
 	6: validateCreateCluster,
-	15: validateAddClusterNode
+	7: validateConfigCluster,
+	15: validateAddClusterNode,
+	21: validateServiceAdd,
+	24: validateServiceEdit,
+	31: validateResourceAdd,
+	33: validateResourceEdit,
+	51: validateFenceAdd,
+	50: validateFenceEdit
 }
 
 def validatePost(self, request):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-27 16:18 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-09-27 16:18 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-09-27 16:18:04

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	changed method calls to correct ones

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.72&r2=1.73

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 15:51:36	1.72
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 16:18:03	1.73
@@ -1808,8 +1808,8 @@
   ricci_agent = resolve_nodename(self, clustername, ragent)
   rc = RicciCommunicator(ricci_agent)
   map = {}
-  map['os'] = rc.getOS()
-  map['isVirtualized'] = False
+  map['os'] = rc.os()
+  map['isVirtualized'] = rc.dom0()
   return map
 
 def getResourcesInfo(modelb, request):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-27 15:51 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-09-27 15:51 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-09-27 15:51:36

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fixed misspellling

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.71&r2=1.72

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 15:35:09	1.71
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 15:51:36	1.72
@@ -1806,7 +1806,7 @@
 def getClusterOS(self, ragent, request):
   clustername = request['clustername']
   ricci_agent = resolve_nodename(self, clustername, ragent)
-  rc = ricci_communicator(ricci_agent)
+  rc = RicciCommunicator(ricci_agent)
   map = {}
   map['os'] = rc.getOS()
   map['isVirtualized'] = False



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-27 15:35 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-09-27 15:35 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-09-27 15:35:09

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	self referential bug fix

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.70&r2=1.71

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/25 22:59:15	1.70
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/27 15:35:09	1.71
@@ -1805,7 +1805,7 @@
 
 def getClusterOS(self, ragent, request):
   clustername = request['clustername']
-  ricci_agent = resolve_nodename(clustername, ragent)
+  ricci_agent = resolve_nodename(self, clustername, ragent)
   rc = ricci_communicator(ricci_agent)
   map = {}
   map['os'] = rc.getOS()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-25 22:59 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-09-25 22:59 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-09-25 22:59:15

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py ricci_bridge.py 

Log message:
	add node stuff

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.69&r2=1.70
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.20&r2=1.21
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.18&r2=1.19

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/25 21:00:14	1.69
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/25 22:59:15	1.70
@@ -23,7 +23,7 @@
 #then only display chooser if the current user has 
 #permissions on at least one. If the user is admin, show ALL clusters
 
-from homebase_adapters import nodeAuth, nodeUnauth, manageCluster
+from homebase_adapters import nodeAuth, nodeUnauth, manageCluster, createClusterSystems
 
 CLUSTER_FOLDER_PATH = '/luci/systems/cluster/'
 
@@ -151,6 +151,7 @@
 			cluster_properties['isComplete'] = False
 			errors.append(error)
 			return (False, {'errors': errors, 'requestResults':cluster_properties })
+
 		batch_id_map = {}
 		for i in nodeList:
 			try:
@@ -189,7 +190,8 @@
 
 def validateAddClusterNode(self, request):
 	errors = list()
-	messages = list()
+	messages = list() 
+	requestResults = {}
 
 	try:
 	 	sessionData = request.SESSION.get('checkRet')
@@ -199,7 +201,7 @@
 	if 'clusterName' in request.form:
 		clusterName = request.form['clusterName']
 	else:
-		return (False, {'errors': [ 'Cluster name is missing'] })
+		return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
 
 	try:
 		numStorage = int(request.form['numStorage'])
@@ -207,7 +209,7 @@
 			raise
 	except:
 		errors.append('You must specify at least one node to add to the cluster')
-		return (False, {'errors': [ errors ] })
+		return (False, {'errors': [ errors ], 'requestResults': requestResults })
 
 	ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
 	errors.extend(ret[0])
@@ -225,18 +227,43 @@
 
 	i = 0
 	while i < len(nodeList):
+		clunode = nodeList[i]
 		try:
-			x = 0 # ricci call succeeds
-			messages.append('Cluster join initiated for host \"' + i['ricci_host'] + '\"')
+			batchNode = addClusterNodeBatch(clusterName, True, False, False)
+			if not batchNode:
+				raise
 			del nodeList[i]
 		except:
-			i['errors'] = True
-			errors.append('Unable to initiate node creation for host \"' + i['ricci_host'] + '\"')
-			cluster_properties['isComplete'] = 0
+			clunode['errors'] = True
+			nodeUnauth(nodeList)
+			cluster_properties['isComplete'] = False
+			errors.append('Unable to initiate node creation for host \"' + clunode['ricci_host'] + '\"')
 
 	if not cluster_properties['isComplete']:
 		return (False, {'errors': errors, 'requestResults': cluster_properties})
 
+	error = createClusterSystems(self, clusterName, nodeList)
+	if error:
+		nodeUnauth(nodeList)
+		cluster_properties['isComplete'] = False
+		errors.append(error)
+		return (False, {'errors': errors, 'requestResults': cluster_properties})
+
+	batch_id_map = {}
+	for i in nodeList:
+		clunode = nodeList[i]
+		try:
+			rc = RicciCommunicator(clunode['ricci_host'])
+			resultNode = rc.process_batch(batchNode, async=True)
+			batch_id_map[clunode['ricci_host']] = resultNode.getAttribute('batch_id')
+			messages.append('Cluster join initiated for host \"' + clunode['ricci_host'] + '\"')
+		except:
+			nodeUnauth(nodeList)
+			cluster_properties['isComplete'] = False
+			errors.append('An error occurred while attempting to add cluster node \"' + clunode['ricci_host'] + '\"')
+			return (False, {'errors': errors, 'requestResults': cluster_properties})
+
+	buildClusterCreateFlags(self, batch_id_map, clusterName)
 	return (True, {'errors': errors, 'messages': messages})
 
 formValidators = {
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/08/01 16:27:53	1.20
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/09/25 22:59:15	1.21
@@ -986,6 +986,92 @@
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except: pass
 
+def createClusterSystems(self, clusterName, nodeList):
+	try:
+		clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		if not clusterObj:
+			raise
+	except:
+		nodeUnauth(nodeList)
+		return 'No cluster named \"' + clusterName + '\" is managed by Luci'
+
+	for i in nodeList:
+		if 'ricci_host' in i:
+			host = str(i['ricci_host'])
+		else:
+			host = str(i['host'])
+
+		try:
+			clusterObj.manage_addFolder(host, '__luci__:csystem:' + clusterName)
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
+			if not newSystem:
+				raise
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except:
+			nodeUnauth(nodeList)
+			return 'Unable to create cluster node \"' + host + '\" for cluster \"' + clusterName + '\"'
+
+	try:
+		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+		if not ssystem:
+			raise
+	except:
+		return
+
+	# Only add storage systems if the and cluster node DB
+	# objects were added successfully.
+	for i in nodeList:
+		if 'ricci_host' in i:
+			host = str(i['ricci_host'])
+		else:
+			host = str(i['host'])
+
+		try:
+			# It's already there, as a storage system, no problem.
+			exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			continue
+		except: pass
+
+		try:
+			ssystem.manage_addFolder(host, '__luci__:system')
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except: pass
+
+def delSystem(self, systemName):
+	try:
+		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+	except:
+		return 'Unable to find storage system \"' + systemName + '\"'
+
+	try:
+		rc = RicciCommunicator(systemName)
+		if not rc:
+			raise
+	except:
+		return 'Unable to connect to the ricci agent on \"' + systemName + '\" to unauthenticate'
+
+	# Only unauthenticate if the system isn't a member of
+	# a managed cluster.
+	cluster_info = rc.cluster_info()
+	if not cluster_info[0]:
+		try: rc.unauth()
+		except: pass
+	else:
+		try:
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + cluster_info[0] + '/' + rc.system_name())
+		except:
+			try: rc.unauth()
+			except: pass
+
+	try:
+		ssystem.manage_delObjects([systemName])
+	except:
+		return 'Unable to delete storage system \"' + systemName + '\"'
+
+
 def delSystem(self, systemName):
 	try:
 		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/09/23 04:04:08	1.18
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/09/25 22:59:15	1.19
@@ -594,6 +594,57 @@
     #parse out log entry  
     return payload
 
+def addClusterNodeBatch(cluster_name, services, shared_storage, LVS):
+	batch = '<?xml version="1.0" ?>'
+	batch += '<batch>'
+	batch += '<module name="rpm">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="install">'
+	batch += '<var name="sets" type="list_xml">'
+	batch += '<set name="Cluster Base"/>'
+	if services:
+		batch += '<set name="Cluster Service Manager"/>'
+	if shared_storage:
+		batch += '<set name="Clustered Storage"/>'
+	if LVS:
+		batch += '<set name="Linux Virtual Server"/>'
+	batch += '</var>'
+	batch += '</function_call>'
+	batch += '</request>'
+	batch += '</module>'
+
+	batch += '<module name="reboot">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="reboot_now"/>'
+	batch += '</request>'
+	batch += '</module>'
+
+	batch += '<module name="cluster">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="set_cluster.conf">'
+	batch += '<var mutable="false" name="propagate" type="boolean" value="false"/>'
+	batch += '<var mutable="false" name="cluster.conf" type="xml">'
+	batch += '<cluster config_version="1" name="' + cluster_name + '">'
+	batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
+	batch += '<clusternodes/>'
+	batch += '<cman/>'
+	batch += '<fencedevices/>'
+	batch += '<rm/>'
+	batch += '</cluster>'
+	batch += '</var>'
+	batch += '</function_call>'
+	batch += '</request>'
+	batch += '</module>'
+
+	batch += '<module name="cluster">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="start_node"/>'
+	batch += '</request>'
+	batch += '</module>'
+	batch += '</batch>'
+
+	return minidom.parseString(batch).firstChild
+
 def createClusterBatch(cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):
     batch = '<?xml version="1.0" ?>'
     batch += '<batch>'



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-09-22 18:24 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-09-22 18:24 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-09-22 18:24:45

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	small tweaks

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.65&r2=1.66

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/14 21:24:25	1.65
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/22 18:24:45	1.66
@@ -972,7 +972,7 @@
       
   hmap['resource_list'] = resource_list
   hmap['root_uuid'] = root_uuid
-  hmap['uuid_list'] = map(lambda x: make_uuid('resource'), range(30))
+  hmap['uuid_list'] = map(lambda x: make_uuid('resource'), xrange(30))
   return hmap
 
 def recurse_resources(parent_uuid, child, resource_list, indent_ctr, parent=None):
@@ -997,13 +997,13 @@
   rc_map['uuid'] = make_uuid('resource')
   rc_map['parent_uuid'] = parent_uuid
 
-  new_indent_ctr = indent_ctr + 1
-
   resource_list.append(rc_map)
   kids = child.getChildren()
   child_depth = 0
+  new_indent_ctr = indent_ctr + 1
   for kid in kids:
-    child_depth = recurse_resources(rc_map['uuid'], kid, resource_list, new_indent_ctr, child)
+    cdepth = recurse_resources(rc_map['uuid'], kid, resource_list, new_indent_ctr, child)
+    child_depth = max(cdepth, child_depth)
 
   rc_map['max_depth'] = child_depth
   return child_depth + 1



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-30 22:59 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-08-30 22:59 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-08-30 22:59:02

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	add a max_depth field for resources to allow for proper nesting of tags for the ui

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.61&r2=1.62

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/22 17:46:04	1.61
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/30 22:59:01	1.62
@@ -989,12 +989,6 @@
     rc_map['type'] = child.getResourceType()
 
   rc_map['indent_ctr'] = indent_ctr
-  indent_str = '_'
-  i = indent_ctr
-  while i>0:
-    indent_str += '__'
-    i -= 1
-  rc_map['indent_str'] = indent_str
     
   #Note: Final version needs all resource attrs
   rc_map['attrs'] = child.getAttributes()
@@ -1003,10 +997,12 @@
 
   resource_list.append(rc_map)
   kids = child.getChildren()
+  child_depth = 0
   for kid in kids:
-    recurse_resources(kid, resource_list, new_indent_ctr, child)
+    child_depth = recurse_resources(kid, resource_list, new_indent_ctr, child)
 
-  return
+  rc_map['max_depth'] = child_depth
+  return child_depth + 1
     
 def serviceStart(self, ricci_agent, req):
   rb = ricci_bridge(ricci_agent)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-22 17:46 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-22 17:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-22 17:46:05

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix typo with nav portlet

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.60&r2=1.61

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/22 17:41:17	1.60
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/22 17:46:04	1.61
@@ -313,7 +313,7 @@
   #test...
   #clcfg['show_children'] = True
   #Add all cluster type pages here:
-  if pagetype == CLUSTER or pagetype == CLUSTER_ADD or pagetype == CLUSTER_CONFIG:
+  if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
     clcfg['show_children'] = True
   else:
     clcfg['show_children'] = False
@@ -399,7 +399,7 @@
   ndcfg['cfg_type'] = "nodecfg"
   ndcfg['absolute_url'] = url + "?pagetype=" + NODE_CONFIG + "&clustername=" + cluname
   ndcfg['Description'] = "Configure cluster nodes"
-  if pagetype == NODE_CONFIG or pagetype == NODE or pagetype == NODES or pagetype == NODE_LIST or pagetype == NODEGRID or pagetype == NODE_ADD:
+  if pagetype == NODE_CONFIG or pagetype == NODE or pagetype == NODES or pagetype == NODE_LIST or pagetype == NODE_GRID or pagetype == NODE_ADD:
     ndcfg['show_children'] = True
   else:
     ndcfg['show_children'] = False



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-22 17:41 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-22 17:41 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-22 17:41:17

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fix minor cosmetic issue with nav portlet

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.59&r2=1.60

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 23:40:27	1.59
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/22 17:41:17	1.60
@@ -312,7 +312,8 @@
 
   #test...
   #clcfg['show_children'] = True
-  if pagetype == CLUSTERS:
+  #Add all cluster type pages here:
+  if pagetype == CLUSTER or pagetype == CLUSTER_ADD or pagetype == CLUSTER_CONFIG:
     clcfg['show_children'] = True
   else:
     clcfg['show_children'] = False
@@ -398,7 +399,7 @@
   ndcfg['cfg_type'] = "nodecfg"
   ndcfg['absolute_url'] = url + "?pagetype=" + NODE_CONFIG + "&clustername=" + cluname
   ndcfg['Description'] = "Configure cluster nodes"
-  if pagetype == NODE_CONFIG or pagetype == NODE:
+  if pagetype == NODE_CONFIG or pagetype == NODE or pagetype == NODES or pagetype == NODE_LIST or pagetype == NODEGRID or pagetype == NODE_ADD:
     ndcfg['show_children'] = True
   else:
     ndcfg['show_children'] = False



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-16 23:40 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-16 23:40 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-16 23:40:27

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	small nits

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.58&r2=1.59

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 21:56:11	1.58
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 23:40:27	1.59
@@ -1103,7 +1103,7 @@
       slist.append(item)
   fdomlist = list()
   clustername = request['clustername']
-  baseurl = req['URL']
+  baseurl = request['URL']
   fdoms = modelb.getFailoverDomains()
   svcs = modelb.getServices()
   for fdom in fdoms:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-16 21:56 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-16 21:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-16 21:56:11

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	typo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.57&r2=1.58

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 21:54:14	1.57
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 21:56:11	1.58
@@ -1109,7 +1109,7 @@
   for fdom in fdoms:
     fdom_map = {}
     fdom_map['name'] = fdom.getName()
-    fdom_map['cfgurl'] = baseurl + "?pagetype=" + FDOM_LIST + "&clustername=" clustername
+    fdom_map['cfgurl'] = baseurl + "?pagetype=" + FDOM_LIST + "&clustername=" + clustername
     ordered_attr = fdom.getAttribute('ordered')
     restricted_attr = fdom.getAttribute('restricted')
     if ordered_attr != None and (ordered_attr == "true" or ordered_attr == "1"):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-16 21:54 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-16 21:54 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-16 21:54:14

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fixed indent error

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.56&r2=1.57

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 21:51:46	1.56
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 21:54:14	1.57
@@ -1097,10 +1097,10 @@
   slist = list()
   nlist = list()
   for item in clustatus:
-  if item['type'] == "node":
-    nlist.append(item)
-  elif item['type'] == "service":
-    slist.append(item)
+    if item['type'] == "node":
+      nlist.append(item)
+    elif item['type'] == "service":
+      slist.append(item)
   fdomlist = list()
   clustername = request['clustername']
   baseurl = req['URL']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-16 21:51 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-16 21:51 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-16 21:51:46

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fdom support

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.55&r2=1.56

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 19:14:03	1.55
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 21:51:46	1.56
@@ -1093,13 +1093,23 @@
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
 
-def getFdomsInfo(self, modelb, request):
-  map = {}
+def getFdomsInfo(self, modelb, request, clustatus):
+  slist = list()
+  nlist = list()
+  for item in clustatus:
+  if item['type'] == "node":
+    nlist.append(item)
+  elif item['type'] == "service":
+    slist.append(item)
+  fdomlist = list()
+  clustername = request['clustername']
+  baseurl = req['URL']
   fdoms = modelb.getFailoverDomains()
   svcs = modelb.getServices()
   for fdom in fdoms:
     fdom_map = {}
     fdom_map['name'] = fdom.getName()
+    fdom_map['cfgurl'] = baseurl + "?pagetype=" + FDOM_LIST + "&clustername=" clustername
     ordered_attr = fdom.getAttribute('ordered')
     restricted_attr = fdom.getAttribute('restricted')
     if ordered_attr != None and (ordered_attr == "true" or ordered_attr == "1"):
@@ -1111,12 +1121,46 @@
     else:
       fdom_map['restricted'] = False
     nodes = fdom.getChildren()
+    nodelist = list()
     for node in nodes:
       nodesmap = {}
-      nodesmap['nodename'] = node.getName() 
+      ndname = node.getName()
+      for nitem in nlist:
+        if nitem['name'] == ndname:
+          break
+      nodesmap['nodename'] = ndname 
+      nodesmap['nodecfgurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + ndname + "&pagetype=" + NODE
+      if nitem['clustered'] == "true":
+        nodesmap['status'] = NODE_ACTIVE
+      elif nitem['online'] == "false":
+        nodesmap['status'] = NODE_UNKNOWN
+      else:
+        nodesmap['status'] = NODE_INACTIVE
       priority_attr =  node.getAttribute('priority')
       if priority_attr != None:
-        nodesmap['priority'] = priority_attr
+        nodesmap['priority'] = "0"
+      nodelist.append(nodesmap)
+    fdom_map['nodeslist'] = nodelist
+
+    svclist = list()
+    for svc in svcs:
+      svcname = svc.getName()
+      for sitem in slist:
+        if sitem['name'] == svcname:
+          break  #found more info about service...
+
+      domain = svc.getAttribute("domain")
+      if domain != None:
+        if domain == fdom.getName():
+          svcmap = {}
+          svcmap['name'] = svcname
+          svcmap['status'] = sitem['running']
+          svcmap['svcurl'] = baseurl + "?pagetype=" + SERVICE + "&clustername=" + clustername + "&servicename=" + svcname
+          svcmap['location'] = sitem['nodename']
+          svclist.append(svcmap)
+    fdom_map['svclist'] = svclist
+    fdomlist.append(fdom_map)
+  return fdomlist
  
 def processClusterProps(self, ricci_agent, request):
   #First, retrieve cluster.conf from session



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-16 19:14 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-08-16 19:14 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-08-16 19:14:04

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix typo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.54&r2=1.55

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 16:10:13	1.54
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 19:14:03	1.55
@@ -1116,7 +1116,7 @@
       nodesmap['nodename'] = node.getName() 
       priority_attr =  node.getAttribute('priority')
       if priority_attr != None:
-        nodesmap['priority'] = 
+        nodesmap['priority'] = priority_attr
  
 def processClusterProps(self, ricci_agent, request):
   #First, retrieve cluster.conf from session



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-16 16:10 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-16 16:10 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-16 16:10:13

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	os and virt specialization

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.53&r2=1.54

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/14 15:12:56	1.53
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/16 16:10:13	1.54
@@ -3,6 +3,7 @@
 import AccessControl
 from conga_constants import *
 from ricci_bridge import *
+from ricci_communicator import *
 import time
 import Products.ManagedSystem
 from Ip import Ip
@@ -1091,6 +1092,31 @@
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+
+def getFdomsInfo(self, modelb, request):
+  map = {}
+  fdoms = modelb.getFailoverDomains()
+  svcs = modelb.getServices()
+  for fdom in fdoms:
+    fdom_map = {}
+    fdom_map['name'] = fdom.getName()
+    ordered_attr = fdom.getAttribute('ordered')
+    restricted_attr = fdom.getAttribute('restricted')
+    if ordered_attr != None and (ordered_attr == "true" or ordered_attr == "1"):
+      fdom_map['ordered'] = True
+    else:
+      fdom_map['ordered'] = False
+    if restricted_attr != None and (restricted_attr == "true" or restricted_attr == "1"):
+      fdom_map['restricted'] = True
+    else:
+      fdom_map['restricted'] = False
+    nodes = fdom.getChildren()
+    for node in nodes:
+      nodesmap = {}
+      nodesmap['nodename'] = node.getName() 
+      priority_attr =  node.getAttribute('priority')
+      if priority_attr != None:
+        nodesmap['priority'] = 
  
 def processClusterProps(self, ricci_agent, request):
   #First, retrieve cluster.conf from session
@@ -1695,6 +1721,15 @@
       return map
   return map
 
+def getClusterOS(self, ragent, request):
+  clustername = request['clustername']
+  ricci_agent = resolve_nodename(clustername, ragent)
+  rc = ricci_communicator(ricci_agent)
+  map = {}
+  map['os'] = rc.getOS()
+  map['isVirtualized'] = False
+  return map
+
 def getResourcesInfo(modelb, request):
   resList = list()
   baseurl = request['URL']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-14 15:12 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-14 15:12 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-14 16:12:56

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Added stans log xml

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.52&r2=1.53

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 19:38:55	1.52
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/14 15:12:56	1.53
@@ -1650,15 +1650,8 @@
   nodename = request['nodename']
   clustername = request['clustername']
   nodename_resolved = resolve_nodename(self, clustername, nodename)
-  #rb = ricci_bridge(nodename_resolved)
-  #return rb.getNodeLogs()
-
-  str_buf = "Logging stuff <br/>"
-  file = open("./conga_constants.py", 'r')
-  lines = file.readlines()
-  for line in lines:
-    str_buf = str_buf + line + "<br/>"
-  return str_buf
+  rb = ricci_bridge(nodename_resolved)
+  return rb.getNodeLogs()
 
 def isClusterBusy(self, req):
   items = None



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 19:38 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 19:38 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 20:38:55

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	typo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.51&r2=1.52

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 19:37:28	1.51
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 19:38:55	1.52
@@ -2000,6 +2000,6 @@
         nodefolder.manage_delObjects(item[0])
         return True
       else:
-        return False Not finished, so cannot remove flag
+        return False #Not finished, so cannot remove flag
 
   return True



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 19:37 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 19:37 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 20:37:28

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	code to remove defunct node flags

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.50&r2=1.51

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 18:36:01	1.50
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 19:37:28	1.51
@@ -1331,6 +1331,9 @@
     nodefolder = self.restrictedTraverse(path)
     batch_id = str(batch_number)
     objname = nodename_resolved + "____flag"
+    if noNodeFlagsPresent(self, nodefolder, objname, nodename_resolved) == False:
+      raise UnknownClusterError("Fatal", "An unfinished task flag exists for node %s" % nodename)
+
     nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #Now we need to annotate the new DB object
     objpath = path + "/" + objname
@@ -1985,3 +1988,18 @@
       return obj[0]
 
   return None
+
+def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
+  items = nodefolder.objectItems()
+  for item in items:
+    if item[0] == flagname:  #a flag already exists...
+      #try and delete it
+      rb = ricci_bridge(hostname)
+      finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
+      if finished == True:
+        nodefolder.manage_delObjects(item[0])
+        return True
+      else:
+        return False Not finished, so cannot remove flag
+
+  return True



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 18:36 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 18:36 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 19:36:02

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	address some shortcomings in nodeinfo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.49&r2=1.50

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 16:32:24	1.49
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 18:36:01	1.50
@@ -1487,6 +1487,11 @@
   infohash = {}
   baseurl = request['URL']
   nodestate = NODE_ACTIVE
+  svclist = list()
+  for thing in status:
+    if thing['type'] == "service":
+      svclist.append(thing)
+
   #Get cluster name and node name from request
   clustername = request['clustername']
   nodename = request['nodename']
@@ -1521,6 +1526,31 @@
     infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['fence_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
+
+  #figure out current services running on this node
+  svc_dict_list = list()
+  for svc in svclist:
+    if svc['nodename'] == nodename:
+      svc_dict = {}
+      svcname = svc['name']
+      svcurl = baseurl + "?" + PAGETYPE + "=" + SERVICE + "&" + CLUNAME + "=" + clustername + "&servicename=" + svcname
+      svc_dict['servicename'] = svcname
+      svc_dict['svcurl'] = svcurl
+      svc_dict_list.append(svc_dict)
+                                                                                
+  infohash['currentservices'] = svc_dict_list
+
+  #next is faildoms
+  fdoms = model.getFailoverDomainsForNode(nodename)
+  fdom_dict_list = list()
+  for fdom in fdoms:
+    fdom_dict = {}
+    fdom_dict['name'] = fdom.getName()
+    fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName()
+    fdom_dict['fdomurl'] = fdomurl
+    fdom_dict_list.append(fdom_dict)
+                                                                              
+  infohash['fdoms'] = fdom_dict_list
                                                                                 
   #return infohash
   infohash['d_states'] = None
@@ -1616,7 +1646,7 @@
 def getLogsForNode(self, request):
   nodename = request['nodename']
   clustername = request['clustername']
-  nodename_resolved = resolve_nodename(clustername, nodename)
+  nodename_resolved = resolve_nodename(self, clustername, nodename)
   #rb = ricci_bridge(nodename_resolved)
   #return rb.getNodeLogs()
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 16:32 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 16:32 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 17:32:24

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Had log url in wrong place

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.48&r2=1.49

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 16:15:28	1.48
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 16:32:24	1.49
@@ -1571,6 +1571,7 @@
       map['status'] = NODE_INACTIVE
       map['status_str'] = NODE_INACTIVE_STR
 
+    map['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + name + "&clustername=" + clustername                                                                            
     #set up URLs for dropdown menu...
     if map['status'] == NODE_ACTIVE:
       map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
@@ -1619,11 +1620,11 @@
   #rb = ricci_bridge(nodename_resolved)
   #return rb.getNodeLogs()
 
-  str_buf = ""
+  str_buf = "Logging stuff <br/>"
   file = open("./conga_constants.py", 'r')
   lines = file.readlines()
   for line in lines:
-  str_buf = str_buf + line + "<br/>"
+    str_buf = str_buf + line + "<br/>"
   return str_buf
 
 def isClusterBusy(self, req):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 16:15 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 16:15 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 17:15:28

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py ricci_bridge.py 

Log message:
	logging link

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.47&r2=1.48
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.9&r2=1.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.15&r2=1.16

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 15:02:14	1.47
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 16:15:28	1.48
@@ -1534,7 +1534,8 @@
     dlist.append("rgmanager")
     states = rb.getDaemonStates(dlist)
     infohash['d_states'] = states
-                                                                                
+    
+  infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername                                                                            
   return infohash
   #get list of faildoms for node  
 
@@ -1611,6 +1612,20 @@
 
   return resultlist
 
+def getLogsForNode(self, request):
+  nodename = request['nodename']
+  clustername = request['clustername']
+  nodename_resolved = resolve_nodename(clustername, nodename)
+  #rb = ricci_bridge(nodename_resolved)
+  #return rb.getNodeLogs()
+
+  str_buf = ""
+  file = open("./conga_constants.py", 'r')
+  lines = file.readlines()
+  for line in lines:
+  str_buf = str_buf + line + "<br/>"
+  return str_buf
+
 def isClusterBusy(self, req):
   items = None
   map = {}
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/08/13 14:02:46	1.9
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/08/13 16:15:28	1.10
@@ -12,6 +12,7 @@
 NODE_CONFIG="14"
 NODE_ADD="15"
 NODE_PROCESS="16"
+NODE_LOGS="17"
 SERVICES="20"
 SERVICE_ADD="21"
 SERVICE_LIST="22"
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/08/12 17:59:40	1.15
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/08/13 16:15:28	1.16
@@ -580,6 +580,11 @@
 
     return (batch_number, result)
 
+  def getNodeLogs(self):
+    QUERY_STR = '<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name=logging'
+
+    pass
+
 def createClusterBatch(cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):
     batch = '<?xml version="1.0" ?>'
     batch += '<batch>'



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 15:02 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 15:02 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 16:02:14

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fixed pagetype error

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.46&r2=1.47

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 14:57:16	1.46
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 15:02:14	1.47
@@ -1557,7 +1557,7 @@
     map['nodename'] = name
     clustername = req['clustername']
     baseurl = req['URL']
-    cfgurl = baseurl + "?" + PAGETYPE + "=" + NODE_CONFIG + "&" + CLUNAME + "=" + clustername + "&nodename=" + name
+    cfgurl = baseurl + "?" + PAGETYPE + "=" + NODE + "&" + CLUNAME + "=" + clustername + "&nodename=" + name
     map['configurl'] = cfgurl
     map['fenceurl'] = cfgurl + "#fence"
     if item['clustered'] == "true":



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 14:57 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 14:57 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 15:57:16

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	making node list dropdown work

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.45&r2=1.46

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 13:48:36	1.45
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 14:57:16	1.46
@@ -1569,6 +1569,21 @@
     else:
       map['status'] = NODE_INACTIVE
       map['status_str'] = NODE_INACTIVE_STR
+
+    #set up URLs for dropdown menu...
+    if map['status'] == NODE_ACTIVE:
+      map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
+      map['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
+      map['fence_it_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + name + "&clustername=" + clustername
+      map['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + name + "&clustername=" + clustername
+                                                                                
+    if map['status'] == NODE_INACTIVE:
+      map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
+      map['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
+      map['fence_it_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + name + "&clustername=" + clustername
+      map['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + name + "&clustername=" + clustername
+
+
     #figure out current services running on this node
     svc_dict_list = list()
     for svc in svclist:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-13 13:48 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-13 13:48 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-13 14:48:37

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 

Log message:
	Better node info look

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.44&r2=1.45
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.7&r2=1.8

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 21:13:55	1.44
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/13 13:48:36	1.45
@@ -1560,10 +1560,15 @@
     cfgurl = baseurl + "?" + PAGETYPE + "=" + NODE_CONFIG + "&" + CLUNAME + "=" + clustername + "&nodename=" + name
     map['configurl'] = cfgurl
     map['fenceurl'] = cfgurl + "#fence"
-    if item['online'] == "true":
-      map['status'] = "online"
+    if item['clustered'] == "true":
+      map['status'] = NODE_ACTIVE
+      map['status_str'] = NODE_ACTIVE_STR
+    elif item['online'] == "false":
+      map['status'] = NODE_UNKNOWN
+      map['status_str'] = NODE_UNKNOWN_STR
     else:
-      map['status'] = "offline"
+      map['status'] = NODE_INACTIVE
+      map['status_str'] = NODE_INACTIVE_STR
     #figure out current services running on this node
     svc_dict_list = list()
     for svc in svclist:
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/08/02 17:25:54	1.7
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/08/13 13:48:36	1.8
@@ -65,3 +65,6 @@
 NODE_ACTIVE="0"
 NODE_INACTIVE="1"
 NODE_UNKNOWN="2"
+NODE_ACTIVE_STR="Cluster Member"
+NODE_INACTIVE_ST="Not a Cluster Member"
+NODE_UNKNOWN_STR="Unknown State"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-12 21:13 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-12 21:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-12 22:13:55

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Various flaw fixes

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.43&r2=1.44

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 20:31:18	1.43
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 21:13:55	1.44
@@ -1319,7 +1319,7 @@
   clustername = request['clustername']
   nodename = request['nodename']
   task = request['task']
-  nodename_resolved = resolve_nodename(clustername, nodename)
+  nodename_resolved = resolve_nodename(self, clustername, nodename)
   if nodename_resolved == None:
     return None
 
@@ -1339,9 +1339,9 @@
     flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
                                                                                 
-    response = req.RESPONSE
+    response = request.RESPONSE
     #Is this correct? Should we re-direct to the cluster page?
-    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 
   elif task == NODE_JOIN_CLUSTER:
     rb = ricci_bridge(nodename_resolved)
@@ -1359,9 +1359,9 @@
     flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
                                                                                 
-    response = req.RESPONSE
+    response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
-    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 
 
   elif task == NODE_REBOOT:
@@ -1380,9 +1380,9 @@
     flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
                                                                                 
-    response = req.RESPONSE
+    response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
-    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 
 
   elif task == NODE_FENCE:
@@ -1418,9 +1418,9 @@
     flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
                                                                                 
-    response = req.RESPONSE
+    response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
-    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+    response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
 
 
   elif task == NODE_DELETE:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-12 20:31 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-12 20:31 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-12 21:31:18

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	resolve FQDNs

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.42&r2=1.43

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 18:22:29	1.42
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 20:31:18	1.43
@@ -1319,15 +1319,18 @@
   clustername = request['clustername']
   nodename = request['nodename']
   task = request['task']
+  nodename_resolved = resolve_nodename(clustername, nodename)
+  if nodename_resolved == None:
+    return None
 
   if task == NODE_LEAVE_CLUSTER:
-    rb = ricci_bridge(nodename)
+    rb = ricci_bridge(nodename_resolved)
     batch_number, result = rb.nodeLeaveCluster()
 
-    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename
+    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
     nodefolder = self.restrictedTraverse(path)
     batch_id = str(batch_number)
-    objname = nodename + "____flag"
+    objname = nodename_resolved + "____flag"
     nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #Now we need to annotate the new DB object
     objpath = path + "/" + objname
@@ -1341,13 +1344,13 @@
     response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
 
   elif task == NODE_JOIN_CLUSTER:
-    rb = ricci_bridge(nodename)
+    rb = ricci_bridge(nodename_resolved)
     batch_number, result = rb.nodeJoinCluster()
 
-    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename
+    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
     nodefolder = self.restrictedTraverse(path)
     batch_id = str(batch_number)
-    objname = nodename + "____flag"
+    objname = nodename_resolved + "____flag"
     nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #Now we need to annotate the new DB object
     objpath = path + "/" + objname
@@ -1362,13 +1365,13 @@
 
 
   elif task == NODE_REBOOT:
-    rb = ricci_bridge(nodename)
+    rb = ricci_bridge(nodename_resolved)
     batch_number, result = rb.nodeReboot()
 
-    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename
+    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename_resolved
     nodefolder = self.restrictedTraverse(path)
     batch_id = str(batch_number)
-    objname = nodename + "____flag"
+    objname = nodename_resolved + "____flag"
     nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #Now we need to annotate the new DB object
     objpath = path + "/" + objname
@@ -1390,7 +1393,7 @@
       nodes = clusterfolder.objectItems('Folder')
       found_one = False
       for node in nodes:
-        if node[1].getID() == nodename:
+        if node[1].getID().find(nodename) != (-1):
           continue
         rb = ricci_bridge(node[1].getId())
         if rb.getRicciResponse() == True:
@@ -1403,10 +1406,10 @@
 
     batch_number, result = rb.nodeFence(nodename)
 
-    path = path + "/" + nodename
+    path = path + "/" + nodename_resolved
     nodefolder = self.restrictedTraverse(path)
     batch_id = str(batch_number)
-    objname = nodename + "____flag"
+    objname = nodename_resolved + "____flag"
     nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #Now we need to annotate the new DB object
     objpath = path + "/" + objname
@@ -1428,7 +1431,7 @@
     #First, delete cluster.conf from node to be deleted.
 
     #next, have node leave cluster.
-    rb = ricci_bridge(nodename)
+    rb = ricci_bridge(nodename_resolved)
     batch_number, result = rb.nodeLeaveCluster()
 
     #It is not worth flagging this node in DB, as we are going
@@ -1446,8 +1449,9 @@
       nodes = clusterfolder.objectItems('Folder')
       found_one = False
       for node in nodes:
-        if node[1].getID() == nodename:
+        if node[1].getID().find(nodename) != (-1):
           continue
+        #here we make certain the node is up...
         rbridge = ricci_bridge(node[1].getId())
         if rbridge.getRicciResponse() == True:
           found_one = True
@@ -1461,13 +1465,13 @@
 
     #Now we need to delete the node from the DB
     path = CLUSTER_FOLDER_PATH + clustername
-    del_path = path + "/" + nodename
+    del_path = path + "/" + nodename_resolved
     delnode = self.restrictedTraverse(del_path)
     clusterfolder = self.restrictedTraverse(path)
     clusterfolder.manage_delObjects(delnode[0])
 
     batch_id = str(batch_number)
-    objname = ragent + "____flag"
+    objname = nodename_resolved + "____flag"
     clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #Now we need to annotate the new DB object
     objpath = path + "/" + objname
@@ -1906,4 +1910,12 @@
     pass
   return
 
+def resolve_nodename(self, clustername, nodename):
+  path = CLUSTER_FOLDER_PATH + clustername
+  clusterfolder = self.restrictedTraverse(path)
+  objs = clusterfolder.objectItems()
+  for obj in objs:
+    if obj[0].find(nodename) != (-1):
+      return obj[0]
 
+  return None



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-12 18:22 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-12 18:22 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-12 19:22:29

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	bug fix in node info

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.41&r2=1.42

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 17:53:09	1.41
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 18:22:29	1.42
@@ -1519,6 +1519,7 @@
     infohash['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
                                                                                 
   #return infohash
+  infohash['d_states'] = None
   if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
   #call service module on node and find out which daemons are running
     rb = ricci_bridge(nodename)



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-12 17:53 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-12 17:53 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-12 18:53:10

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Support for node actions

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.40&r2=1.41

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/11 00:29:11	1.40
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/12 17:53:09	1.41
@@ -1315,6 +1315,169 @@
 
   return map
 
+def nodeTaskProcess(self, model, request):
+  clustername = request['clustername']
+  nodename = request['nodename']
+  task = request['task']
+
+  if task == NODE_LEAVE_CLUSTER:
+    rb = ricci_bridge(nodename)
+    batch_number, result = rb.nodeLeaveCluster()
+
+    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename
+    nodefolder = self.restrictedTraverse(path)
+    batch_id = str(batch_number)
+    objname = nodename + "____flag"
+    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+    #Now we need to annotate the new DB object
+    objpath = path + "/" + objname
+    flag = self.restrictedTraverse(objpath)
+    flag.manage_addProperty(BATCH_ID,batch_id, "string")
+    flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
+    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
+                                                                                
+    response = req.RESPONSE
+    #Is this correct? Should we re-direct to the cluster page?
+    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+
+  elif task == NODE_JOIN_CLUSTER:
+    rb = ricci_bridge(nodename)
+    batch_number, result = rb.nodeJoinCluster()
+
+    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename
+    nodefolder = self.restrictedTraverse(path)
+    batch_id = str(batch_number)
+    objname = nodename + "____flag"
+    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+    #Now we need to annotate the new DB object
+    objpath = path + "/" + objname
+    flag = self.restrictedTraverse(objpath)
+    flag.manage_addProperty(BATCH_ID,batch_id, "string")
+    flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
+    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
+                                                                                
+    response = req.RESPONSE
+    #Once again, is this correct? Should we re-direct to the cluster page?
+    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+
+
+  elif task == NODE_REBOOT:
+    rb = ricci_bridge(nodename)
+    batch_number, result = rb.nodeReboot()
+
+    path = CLUSTER_FOLDER_PATH + clustername + "/" + nodename
+    nodefolder = self.restrictedTraverse(path)
+    batch_id = str(batch_number)
+    objname = nodename + "____flag"
+    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+    #Now we need to annotate the new DB object
+    objpath = path + "/" + objname
+    flag = self.restrictedTraverse(objpath)
+    flag.manage_addProperty(BATCH_ID,batch_id, "string")
+    flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
+    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
+                                                                                
+    response = req.RESPONSE
+    #Once again, is this correct? Should we re-direct to the cluster page?
+    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+
+
+  elif task == NODE_FENCE:
+    #here, we DON'T want to open connection to node to be fenced.
+    path = CLUSTER_FOLDER_PATH + clustername
+    clusterfolder = self.restrictedTraverse(path)
+    if clusterfolder != None:
+      nodes = clusterfolder.objectItems('Folder')
+      found_one = False
+      for node in nodes:
+        if node[1].getID() == nodename:
+          continue
+        rb = ricci_bridge(node[1].getId())
+        if rb.getRicciResponse() == True:
+          found_one = True
+          break
+      if found_one == False:
+        return None
+    else:
+      return None
+
+    batch_number, result = rb.nodeFence(nodename)
+
+    path = path + "/" + nodename
+    nodefolder = self.restrictedTraverse(path)
+    batch_id = str(batch_number)
+    objname = nodename + "____flag"
+    nodefolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+    #Now we need to annotate the new DB object
+    objpath = path + "/" + objname
+    flag = self.restrictedTraverse(objpath)
+    flag.manage_addProperty(BATCH_ID,batch_id, "string")
+    flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
+    flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
+                                                                                
+    response = req.RESPONSE
+    #Once again, is this correct? Should we re-direct to the cluster page?
+    response.redirect(req['URL'] + "?pagetype=" + CLUSTER_CONFIG)
+
+
+  elif task == NODE_DELETE:
+    #We need to get a node name other than the node
+    #to be deleted, then delete the node from the cluster.conf
+    #and propogate it. We will need two ricci agents for this task.
+    
+    #First, delete cluster.conf from node to be deleted.
+
+    #next, have node leave cluster.
+    rb = ricci_bridge(nodename)
+    batch_number, result = rb.nodeLeaveCluster()
+
+    #It is not worth flagging this node in DB, as we are going
+    #to delete it anyway. Now, we need to delete node from model
+    #and send out new cluster.conf
+
+    model.deleteNode(nodename)
+    str_buf = ""
+    model.exportModelAsString(str_buf)
+
+    #here, we DON'T want to open connection to node to be fenced.
+    path = CLUSTER_FOLDER_PATH + clustername
+    clusterfolder = self.restrictedTraverse(path)
+    if clusterfolder != None:
+      nodes = clusterfolder.objectItems('Folder')
+      found_one = False
+      for node in nodes:
+        if node[1].getID() == nodename:
+          continue
+        rbridge = ricci_bridge(node[1].getId())
+        if rbridge.getRicciResponse() == True:
+          found_one = True
+          break
+      if found_one == False:
+        return None
+    else:
+      return None
+
+    batch_number, result = rbridge.setClusterConf(str(str_buf))
+
+    #Now we need to delete the node from the DB
+    path = CLUSTER_FOLDER_PATH + clustername
+    del_path = path + "/" + nodename
+    delnode = self.restrictedTraverse(del_path)
+    clusterfolder = self.restrictedTraverse(path)
+    clusterfolder.manage_delObjects(delnode[0])
+
+    batch_id = str(batch_number)
+    objname = ragent + "____flag"
+    clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+    #Now we need to annotate the new DB object
+    objpath = path + "/" + objname
+    flag = self.restrictedTraverse(objpath)
+    flag.manage_addProperty(BATCH_ID,batch_id, "string")
+    flag.manage_addProperty(TASKTYPE,NODE_DELETE, "string")
+    flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
+    response = request.RESPONSE
+    response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
+  
 
 def getNodeInfo(self, model, status, request):
   infohash = {}
@@ -1344,16 +1507,16 @@
                                                                                 
   #set up drop down links
   if nodestate == NODE_ACTIVE:
-    infohash['jl_url'] = baseurl + "?pagetype=" + NODE_LEAVE_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
-    infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
-    infohash['fence_url'] = baseurl + "?pagetype=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
-    infohash['delete_url'] = baseurl + "?pagetype=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['fence_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
                                                                                 
   if nodestate == NODE_INACTIVE:
-    infohash['jl_url'] = baseurl + "?pagetype=" + NODE_JOIN_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
-    infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
-    infohash['fence_url'] = baseurl + "?pagetype=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
-    infohash['delete_url'] = baseurl + "?pagetype=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['fence_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
+    infohash['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
                                                                                 
   #return infohash
   if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-11  0:29 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-11  0:29 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-11 00:29:11

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	hash val for svc run state

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.39&r2=1.40

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/10 23:06:42	1.39
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/11 00:29:11	1.40
@@ -1290,6 +1290,7 @@
       svc_dict['nodename'] = svc['nodename']
       svcname = svc['name']
       svc_dict['name'] = svcname
+      svc_dict['srunning'] = svc['running']
       svcurl = baseurl + "?" + PAGETYPE + "=" + SERVICE + "&" + CLUNAME + "=" + clustername + "&servicename=" + svcname
       svc_dict['servicename'] = svcname
       svc_dict['svcurl'] = svcurl



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-10 23:06 shuennek
  0 siblings, 0 replies; 185+ messages in thread
From: shuennek @ 2006-08-10 23:06 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	shuennek at sourceware.org	2006-08-10 23:06:43

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.38&r2=1.39

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/10 16:50:40	1.38
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/10 23:06:42	1.39
@@ -979,7 +979,7 @@
   rc_map = {}
   if parent != None:
     rc_map['parent'] = parent
-  rc_map['resource_name'] = child.getName()
+  rc_map['name'] = child.getName()
   if child.isRefObject() == True:
     rc_map['ref_object'] = True
     rc_map['type'] = child.getObj().getResourceType()
@@ -987,13 +987,18 @@
     rc_map['type'] = child.getResourceType()
 
   rc_map['indent_ctr'] = indent_ctr
+  indent_str = '_'
+  i = indent_ctr
+  while i>0:
+    indent_str += '__'
+    i -= 1
+  rc_map['indent_str'] = indent_str
+    
   #Note: Final version needs all resource attrs
-  attrs = child.getAttributes()
-  attr_keys = attrs.keys()
-  for key in attr_keys:
-    rc_map[key] = attrs[key]
+  rc_map['attrs'] = child.getAttributes()
 
   new_indent_ctr = indent_ctr + 1
+
   resource_list.append(rc_map)
   kids = child.getChildren()
   for kid in kids:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-10 16:50 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-10 16:50 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-10 16:50:41

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Added parent hash field for form urls

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.37&r2=1.38

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/10 14:16:43	1.37
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/10 16:50:40	1.38
@@ -971,12 +971,14 @@
 
   return map
 
-def recurse_resources(child, resource_list, indent_ctr):
+def recurse_resources(child, resource_list, indent_ctr, parent=None):
   #First, add the incoming child as a resource
   #Next, check for children of it
   #Call yourself on every children
   #then return
   rc_map = {}
+  if parent != None:
+    rc_map['parent'] = parent
   rc_map['resource_name'] = child.getName()
   if child.isRefObject() == True:
     rc_map['ref_object'] = True
@@ -995,7 +997,7 @@
   resource_list.append(rc_map)
   kids = child.getChildren()
   for kid in kids:
-    recurse_resources(kid, resource_list, new_indent_ctr)
+    recurse_resources(kid, resource_list, new_indent_ctr, child)
 
   return
     



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-10 14:16 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-10 14:16 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-10 14:16:43

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Added name field to resource hash

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.36&r2=1.37

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/09 22:05:42	1.36
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/10 14:16:43	1.37
@@ -977,6 +977,7 @@
   #Call yourself on every children
   #then return
   rc_map = {}
+  rc_map['resource_name'] = child.getName()
   if child.isRefObject() == True:
     rc_map['ref_object'] = True
     rc_map['type'] = child.getObj().getResourceType()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-09 22:05 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-09 22:05 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-09 22:05:42

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fixed ref object type problem

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.35&r2=1.36

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/09 21:48:55	1.35
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/09 22:05:42	1.36
@@ -979,8 +979,10 @@
   rc_map = {}
   if child.isRefObject() == True:
     rc_map['ref_object'] = True
+    rc_map['type'] = child.getObj().getResourceType()
+  else:
+    rc_map['type'] = child.getResourceType()
 
-  rc_map['type'] = child.getResourceType()
   rc_map['indent_ctr'] = indent_ctr
   #Note: Final version needs all resource attrs
   attrs = child.getAttributes()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-09 21:48 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-09 21:48 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-09 21:48:55

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	finish service info hash

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.34&r2=1.35

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/03 18:36:21	1.34
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/09 21:48:55	1.35
@@ -955,8 +955,47 @@
             starturls.append(starturl)
           innermap['links'] = starturls
         map['innermap'] = innermap
+
+  #Now build hashes for resources under service.
+  #first get service by name from model
+  svc = modelb.getService(servicename)
+  resource_list = list()
+  if svc != None:
+    indent_ctr = 0
+    children = svc.getChildren()
+    
+    for child in children:
+      recurse_resources(child, resource_list, indent_ctr)
+      
+  map['resource_list'] = resource_list 
+
   return map
 
+def recurse_resources(child, resource_list, indent_ctr):
+  #First, add the incoming child as a resource
+  #Next, check for children of it
+  #Call yourself on every children
+  #then return
+  rc_map = {}
+  if child.isRefObject() == True:
+    rc_map['ref_object'] = True
+
+  rc_map['type'] = child.getResourceType()
+  rc_map['indent_ctr'] = indent_ctr
+  #Note: Final version needs all resource attrs
+  attrs = child.getAttributes()
+  attr_keys = attrs.keys()
+  for key in attr_keys:
+    rc_map[key] = attrs[key]
+
+  new_indent_ctr = indent_ctr + 1
+  resource_list.append(rc_map)
+  kids = child.getChildren()
+  for kid in kids:
+    recurse_resources(kid, resource_list, new_indent_ctr)
+
+  return
+    
 def serviceStart(self, ricci_agent, req):
   rb = ricci_bridge(ricci_agent)
   svcname = req['servicename']



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-03 13:37 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-03 13:37 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-03 13:37:39

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fixed desc bug

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.32&r2=1.33

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/02 18:59:04	1.32
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/03 13:37:39	1.33
@@ -1395,6 +1395,7 @@
     if finished == True:
       clusterfolder.manage_delObjects(item[0])
       map['refreshurl'] = '5; url=\".\"'
+      map['desc'] = item[1].getProperty(FLAG_DESC)
       return map
     else:
       map['busy'] = "true"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-02 18:59 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-08-02 18:59 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-08-02 18:59:04

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	use full paths in tab links

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.31&r2=1.32

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/02 17:25:54	1.31
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/02 18:59:04	1.32
@@ -767,7 +767,7 @@
 
   htab = { 'Title':"homebase",
            'Description':"Home base for this luci server", 
-           'Taburl':"../homebase"}
+           'Taburl':"/luci/homebase"}
   if selectedtab == "homebase":
     htab['isSelected'] = True
   else:
@@ -776,7 +776,7 @@
 
   ctab = { 'Title':"cluster",
            'Description':"Cluster configuration page", 
-           'Taburl':"../cluster?pagetype=3"}
+           'Taburl':"/luci/cluster?pagetype=3"}
   if selectedtab == "cluster":
     ctab['isSelected'] = True
   else:
@@ -784,7 +784,7 @@
 
   stab = { 'Title':"storage",
            'Description':"Storage configuration page", 
-           'Taburl':"../storage"}
+           'Taburl':"/luci/storage"}
   if selectedtab == "storage":
     stab['isSelected'] = True
   else:



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-02 17:25 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-08-02 17:25 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-08-02 17:25:54

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 

Log message:
	committing for stephen

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.30&r2=1.31
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.6&r2=1.7

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:29:37	1.30
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/02 17:25:54	1.31
@@ -1394,6 +1394,7 @@
     finished = rb.checkBatch(item[1].getProperty(BATCH_ID)) 
     if finished == True:
       clusterfolder.manage_delObjects(item[0])
+      map['refreshurl'] = '5; url=\".\"'
       return map
     else:
       map['busy'] = "true"
@@ -1423,6 +1424,7 @@
     itemmap['type'] = item.resource_type
     itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_CONFIG
     itemmap['url'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE
+    itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_REMOVE
     resList.append(itemmap)
   return resList
                                                                                 
@@ -1438,9 +1440,48 @@
           resMap['tag_name'] = res.TAG_NAME
           resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
           return resMap
-                                                                                
+
   return {}
-                                                                                
+
+
+def delResource(self, request, ragent):
+  modelb = request.SESSION.get('model')
+  resPtr = modelb.getResourcesPtr()
+  resources = resPtr.getChildren()
+  name = request['resourcename']                                                                             
+  for res in resources:
+    if res.getName() == name:
+      resPtr.removeChild(res)
+      break
+
+  modelstr = ""
+  conf = modelb.exportModelAsString()
+  rb = ricci_bridge(ragent)
+  #try:
+  if True:
+    batch_number, result = rb.setClusterConf(str(conf))
+  #except:
+  else:
+    return "Some error occured in setClusterConf\n"
+
+  clustername = request['clustername']
+  path = CLUSTER_FOLDER_PATH + clustername
+  clusterfolder = self.restrictedTraverse(path)
+  batch_id = str(batch_number)
+  objname = ragent + "____flag"
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+   #Now we need to annotate the new DB object
+  objpath = path + "/" + objname
+  flag = self.restrictedTraverse(objpath)
+  flag.manage_addProperty(BATCH_ID,batch_id, "string")
+  flag.manage_addProperty(TASKTYPE,RESOURCE_REMOVE, "string")
+  flag.manage_addProperty(FLAG_DESC,"Removing Resource \'" + request['resourcename'] + "\'", "string")
+
+  response = request.RESPONSE
+  response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
+
+
+  
 def addResource(self, request, ragent):
   if not request.form:
     return "Nothing submitted, no changes made."
@@ -1483,16 +1524,17 @@
   flag.manage_addProperty(TASKTYPE,RESOURCE_ADD, "string")
   flag.manage_addProperty(FLAG_DESC,"Creating Resource \'" + request.form['resourceName'] + "\'", "string")
 
-  response = req.RESPONSE
+  response = request.RESPONSE
   response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
 
 def getResourceForEdit(modelb, name):
-  resources = modelb.getResourcesPtr().getChildren()
+  resPtr = modelb.getResourcesPtr()
+  resources = resPtr.getChildren()
                                                                                
   for res in resources:
     if res.getName() == name:
-      resources.removeChild(res)
+      resPtr.removeChild(res)
       break
                                                                               
   return res
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/07/28 14:02:45	1.6
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/08/02 17:25:54	1.7
@@ -27,6 +27,7 @@
 RESOURCE_CONFIG="33"
 RESOURCE="34"
 RESOURCE_PROCESS="35"
+RESOURCE_REMOVE="36"
 FDOMS="40"
 FDOM_ADD="41"
 FDOM_LIST="42"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-01 15:29 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-01 15:29 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-01 15:29:38

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	another typo fix

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.29&r2=1.30

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:25:15	1.29
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:29:37	1.30
@@ -1484,7 +1484,7 @@
   flag.manage_addProperty(FLAG_DESC,"Creating Resource \'" + request.form['resourceName'] + "\'", "string")
 
   response = req.RESPONSE
-  response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+  response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
 
 def getResourceForEdit(modelb, name):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-01 15:25 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-01 15:25 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-01 15:25:15

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	typo fix

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.28&r2=1.29

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:20:05	1.28
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:25:15	1.29
@@ -1481,7 +1481,7 @@
   flag = self.restrictedTraverse(objpath)
   flag.manage_addProperty(BATCH_ID,batch_id, "string")
   flag.manage_addProperty(TASKTYPE,RESOURCE_ADD, "string")
-  flag.manage_addProperty(FLAG_DESC,"Creating Resource \'" + request.form['resourcename'] + "\'", "string")
+  flag.manage_addProperty(FLAG_DESC,"Creating Resource \'" + request.form['resourceName'] + "\'", "string")
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-01 15:20 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-01 15:20 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-01 15:20:05

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Yikes. another nit.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.27&r2=1.28

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:13:23	1.27
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:20:05	1.28
@@ -1474,7 +1474,7 @@
   path = CLUSTER_FOLDER_PATH + clustername
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
-  objname = ricci_agent + "____flag"
+  objname = ragent + "____flag"
   clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
    #Now we need to annotate the new DB object
   objpath = path + "/" + objname



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-01 15:13 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-01 15:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-01 15:13:23

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Minor nit re batch_id

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.26&r2=1.27

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:04:09	1.26
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:13:23	1.27
@@ -1465,7 +1465,7 @@
   rb = ricci_bridge(ragent)
   #try:
   if True:
-    batch_id, result = rb.setClusterConf(str(conf))
+    batch_number, result = rb.setClusterConf(str(conf))
   #except:
   else:
     return "Some error occured in setClusterConf\n"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-08-01 15:04 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-08-01 15:04 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-08-01 15:04:09

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	busy-wait for adding a resource

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.25&r2=1.26

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/31 18:21:52	1.25
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/08/01 15:04:09	1.26
@@ -1455,7 +1455,8 @@
            'nfsc': addNfsx,
            'scr': addScr,
            'smb': addSmb}
-                                                                                
+  
+  
   type = request.form["type"]
   res = types[type](request)
   modelb = request.SESSION.get('model')
@@ -1469,7 +1470,22 @@
   else:
     return "Some error occured in setClusterConf\n"
 
-  return "Resource Added"
+  clustername = request['clustername']
+  path = CLUSTER_FOLDER_PATH + clustername
+  clusterfolder = self.restrictedTraverse(path)
+  batch_id = str(batch_number)
+  objname = ricci_agent + "____flag"
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+   #Now we need to annotate the new DB object
+  objpath = path + "/" + objname
+  flag = self.restrictedTraverse(objpath)
+  flag.manage_addProperty(BATCH_ID,batch_id, "string")
+  flag.manage_addProperty(TASKTYPE,RESOURCE_ADD, "string")
+  flag.manage_addProperty(FLAG_DESC,"Creating Resource \'" + request.form['resourcename'] + "\'", "string")
+
+  response = req.RESPONSE
+  response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+
 
 def getResourceForEdit(modelb, name):
   resources = modelb.getResourcesPtr().getChildren()



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-31 18:21 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-07-31 18:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-31 18:21:52

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix call to buildClusterCreateFlags and send cluster name to validatation routines

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.24&r2=1.25

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/31 17:46:32	1.24
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/31 18:21:52	1.25
@@ -25,7 +25,7 @@
 
 CLUSTER_FOLDER_PATH = '/luci/systems/cluster/'
 
-def validateClusterNodes(request, sessionData, numStorage):
+def validateClusterNodes(request, sessionData, clusterName, numStorage):
 	nodeList = list()
 	nodeHash = {}
 	rnodeHash = {}
@@ -118,7 +118,7 @@
 	if numStorage < 2:
 		return (False, { 'errors': ['A cluster must contain at least two nodes'], 'requestResults': requestResults })
 
-	ret = validateClusterNodes(request, sessionData, numStorage)
+	ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
 	errors.extend(ret[0])
 	cluster_properties = ret[1]
 
@@ -154,14 +154,14 @@
 			try:
 				rc = RicciCommunicator(i['ricci_host'])
 				resultNode = rc.process_batch(batchNode, async=True)
-                                batch_id_map[i['ricci_host']] = resultNode.getAttribute('batch_id')
+				batch_id_map[i['ricci_host']] = resultNode.getAttribute('batch_id')
 			except:
 				nodeUnauth(nodeList)
 				cluster_properties['isComplete'] = False
 				errors.append('An error occurred while attempting to add cluster node \"' + i['ricci_host'] + '\"')
 				return (False, {'errors': errors, 'requestResults':cluster_properties })
 
-                buildClusterCreateFlags(batch_id_map, clusterName)
+		buildClusterCreateFlags(self, batch_id_map, clusterName)
 
 	messages.append('Creation of cluster \"' + clusterName + '\" has begun')
 	return (True, {'errors': errors, 'messages': messages })
@@ -204,7 +204,7 @@
 		errors.append('You must specify@least one node to add to the cluster')
 		return (False, {'errors': [ errors ] })
 
-	ret = validateClusterNodes(sessionData, request, numStorage)
+	ret = validateClusterNodes(sessionData, request, clusterName, numStorage)
 	errors.extend(ret[0])
 	cluster_properties = ret[1]
 
@@ -849,6 +849,10 @@
   rb = ricci_bridge(ricci_name)
   doc = rb.getClusterStatus()
   results = list()
+
+  if not doc or not doc.firstChild:
+    return {}
+
   vals = {}
   vals['type'] = "cluster"
   try:
@@ -1213,7 +1217,8 @@
       clulist.append(item)
     else:
       continue
-
+  if len(clulist) < 1:
+    return {}
   clu = clulist[0]
   clustername = clu['name']
   if clu['alias'] != "":



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-28 19:03 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-28 19:03 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-28 19:03:05

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fixed minor nit

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.22&r2=1.23

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 18:57:36	1.22
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 19:03:05	1.23
@@ -920,7 +920,7 @@
   #flag[FLAG_DESC] = "Starting service " + svcname
   flag.manage_addProperty(BATCH_ID,batch_id, "string")
   flag.manage_addProperty(TASKTYPE,SERVICE_START, "string")
-  flag.manage_addProperty(FLAG_DESC,"Starting service " + svcname, "string")
+  flag.manage_addProperty(FLAG_DESC,"Starting service \'" + svcname + "\'", "string")
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-28 18:57 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-28 18:57 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-28 18:57:36

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Added description field to busywait map

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.21&r2=1.22

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 18:40:23	1.21
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 18:57:36	1.22
@@ -1337,13 +1337,14 @@
       part2 = req['QUERY_STRING']
       dex = part2.find("&busyfirst")
       if dex != (-1):
-        tmpstr = part2[:dex]
+        tmpstr = part2[:dex] #This strips off busyfirst var
         part2 = tmpstr
         ###FIXME - The above assumes that the 'busyfirst' query var is at the 
         ###end of the URL...
       wholeurl = part1 + "?" + part2
       #map['url'] = "5, url=" + req['ACTUAL_URL'] + "?" + req['QUERY_STRING']
       map['refreshurl'] = "5; url=" + wholeurl
+      map['desc'] = item[1].getProperty(FLAG_DESC)
       req['specialpagetype'] = "1"
       return map
   return map



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-28 18:40 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-28 18:40 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-28 18:40:23

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fixes for database flags

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.20&r2=1.21

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 14:16:30	1.20
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 18:40:23	1.21
@@ -159,9 +159,12 @@
     #now designate this new object properly
     objpath = path + "/" + objname
     flag = self.restrictedTraverse(objpath)
-    flag[BATCH_ID] = batch_id
-    flag[TASKTYPE] = CLUSTER_ADD
-    flag[FLAG_DESC] = "Creating node " + key + " for cluster " + clusterName
+    #flag[BATCH_ID] = batch_id
+    #flag[TASKTYPE] = CLUSTER_ADD
+    #flag[FLAG_DESC] = "Creating node " + key + " for cluster " + clusterName
+    flag.manage_addProperty(BATCH_ID,batch_id, "string")
+    flag.manage_addProperty(TASKTYPE,CLUSTER_ADD, "string")
+    flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
   
 
 def validateAddClusterNode(self, request):
@@ -912,9 +915,12 @@
   #Now we need to annotate the new DB object
   objpath = path + "/" + objname
   flag = self.restrictedTraverse(objpath)
-  flag[BATCH_ID] = batch_id
-  flag[TASKTYPE] = SERVICE_START
-  flag[FLAG_DESC] = "Starting service " + svcname
+  #flag[BATCH_ID] = batch_id
+  #flag[TASKTYPE] = SERVICE_START
+  #flag[FLAG_DESC] = "Starting service " + svcname
+  flag.manage_addProperty(BATCH_ID,batch_id, "string")
+  flag.manage_addProperty(TASKTYPE,SERVICE_START, "string")
+  flag.manage_addProperty(FLAG_DESC,"Starting service " + svcname, "string")
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
@@ -936,9 +942,12 @@
   #Now we need to annotate the new DB object
   objpath = path + "/" + objname
   flag = self.restrictedTraverse(objpath)
-  flag[BATCH_ID] = batch_id
-  flag[TASKTYPE] = SERVICE_RESTART
-  flag[FLAG_DESC] = "Restarting service " + svcname 
+  #flag[BATCH_ID] = batch_id
+  #flag[TASKTYPE] = SERVICE_RESTART
+  #flag[FLAG_DESC] = "Restarting service " + svcname 
+  flag.manage_addProperty(BATCH_ID,batch_id, "string")
+  flag.manage_addProperty(TASKTYPE,SERVICE_RESTART, "string")
+  flag.manage_addProperty(FLAG_DESC,"Restarting service " + svcname, "string")
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
@@ -959,9 +968,12 @@
   #Now we need to annotate the new DB object
   objpath = path + "/" + objname
   flag = self.restrictedTraverse(objpath)
-  flag[BATCH_ID] = batch_id
-  flag[TASKTYPE] = SERVICE_STOP
-  flag[FLAG_DESC] = "Stopping service " + svcname
+  #flag[BATCH_ID] = batch_id
+  #flag[TASKTYPE] = SERVICE_STOP
+  #flag[FLAG_DESC] = "Stopping service " + svcname
+  flag.manage_addProperty(BATCH_ID,batch_id,"string")
+  flag.manage_addProperty(TASKTYPE,SERVICE_STOP, "string")
+  flag.manage_addProperty(FLAG_DESC,"Stopping service " + svcname,"string")
 
   time.sleep(2)
 
@@ -1315,7 +1327,7 @@
     #Check here for more than 1 entry (an error)
     ricci = item[0].split("____") #This removes the 'flag' suffix
     rb = ricci_bridge(ricci[0])
-    finished = rb.checkBatch(item[1][BATCH_ID]) 
+    finished = rb.checkBatch(item[1].getProperty(BATCH_ID)) 
     if finished == True:
       clusterfolder.manage_delObjects(item[0])
       return map



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-28 14:16 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-28 14:16 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-28 14:16:30

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Had to add a suffix for flag IDs

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.19&r2=1.20

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 14:02:45	1.19
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 14:16:30	1.20
@@ -154,9 +154,10 @@
   for key in batch_map.keys():
     id = batch_map[key]
     batch_id = str(id)
-    clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(key)
+    objname = key + "____flag" #This suffix needed to avoid name collision
+    clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
     #now designate this new object properly
-    objpath = path + "/" + key
+    objpath = path + "/" + objname
     flag = self.restrictedTraverse(objpath)
     flag[BATCH_ID] = batch_id
     flag[TASKTYPE] = CLUSTER_ADD
@@ -906,9 +907,10 @@
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent)
+  objname = ricci_agent + "____flag"
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
   #Now we need to annotate the new DB object
-  objpath = path + "/" + ricci_agent
+  objpath = path + "/" + objname
   flag = self.restrictedTraverse(objpath)
   flag[BATCH_ID] = batch_id
   flag[TASKTYPE] = SERVICE_START
@@ -929,9 +931,10 @@
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent)
+  objname = ricci_agent + "____flag"
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
   #Now we need to annotate the new DB object
-  objpath = path + "/" + ricci_agent
+  objpath = path + "/" + objname
   flag = self.restrictedTraverse(objpath)
   flag[BATCH_ID] = batch_id
   flag[TASKTYPE] = SERVICE_RESTART
@@ -951,9 +954,10 @@
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent)
+  objname = ricci_agent + "____flag"
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
   #Now we need to annotate the new DB object
-  objpath = path + "/" + ricci_agent
+  objpath = path + "/" + objname
   flag = self.restrictedTraverse(objpath)
   flag[BATCH_ID] = batch_id
   flag[TASKTYPE] = SERVICE_STOP
@@ -1309,7 +1313,8 @@
   ##3) The ricci agent has no recollection of the task, so handle like 1 above
   for item in items:
     #Check here for more than 1 entry (an error)
-    rb = ricci_bridge(item[0])
+    ricci = item[0].split("____") #This removes the 'flag' suffix
+    rb = ricci_bridge(ricci[0])
     finished = rb.checkBatch(item[1][BATCH_ID]) 
     if finished == True:
       clusterfolder.manage_delObjects(item[0])



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-28 14:02 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-28 14:02 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-28 14:02:45

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           conga_constants.py 

Log message:
	Refactored how DB flags are created for service changes, and also added DB Flags for cluster creation.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.18&r2=1.19
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.5&r2=1.6

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 11:46:35	1.18
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 14:02:45	1.19
@@ -131,20 +131,38 @@
 			cluster_properties['isComplete'] = False
 			errors.append(error)
 			return (False, {'errors': errors, 'requestResults':cluster_properties })
-
+                batch_id_map = {}
 		for i in nodeList:
 			try:
 				rc = RicciCommunicator(i['ricci_host'])
 				resultNode = rc.process_batch(batchNode, async=True)
+                                batch_id_map[i['ricci_host']] = resultNode.getAttribute('batch_id')
 			except:
 				nodeUnauth(nodeList)
 				cluster_properties['isComplete'] = False
 				errors.append('An error occurred while attempting to add cluster node \"' + i['ricci_host'] + '\"')
 				return (False, {'errors': errors, 'requestResults':cluster_properties })
 
+                buildClusterCreateFlags(batch_id_map, clusterName)
+
 	messages.append('Creation of cluster \"' + clusterName + '\" has begun')
 	return (True, {'errors': errors, 'messages': messages })
 
+def buildClusterCreateFlags(self, batch_map, clusterName):
+  path = CLUSTER_FOLDER_PATH + clusterName
+  clusterfolder = self.restrictedTraverse(path)
+  for key in batch_map.keys():
+    id = batch_map[key]
+    batch_id = str(id)
+    clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(key)
+    #now designate this new object properly
+    objpath = path + "/" + key
+    flag = self.restrictedTraverse(objpath)
+    flag[BATCH_ID] = batch_id
+    flag[TASKTYPE] = CLUSTER_ADD
+    flag[FLAG_DESC] = "Creating node " + key + " for cluster " + clusterName
+  
+
 def validateAddClusterNode(self, request):
 	if 'clusterName' in request.form:
 		clusterName = request.form['clusterName']
@@ -888,7 +906,13 @@
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent + "___" + batch_id)
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent)
+  #Now we need to annotate the new DB object
+  objpath = path + "/" + ricci_agent
+  flag = self.restrictedTraverse(objpath)
+  flag[BATCH_ID] = batch_id
+  flag[TASKTYPE] = SERVICE_START
+  flag[FLAG_DESC] = "Starting service " + svcname
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
@@ -905,7 +929,13 @@
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent + "___" + batch_id)
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent)
+  #Now we need to annotate the new DB object
+  objpath = path + "/" + ricci_agent
+  flag = self.restrictedTraverse(objpath)
+  flag[BATCH_ID] = batch_id
+  flag[TASKTYPE] = SERVICE_RESTART
+  flag[FLAG_DESC] = "Restarting service " + svcname 
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
@@ -921,11 +951,14 @@
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
   batch_id = str(batch_number)
-  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent + "___" + batch_id)
-  #newobjpath = path + "/" + batch_id
-  #newobj = self.restrictedTraverse(newobjpath)
-  #newobj.manage_addProperty('title', ricci_agent, 'string')
-  #newobj.manage_addProperty('Description', "Please be Patient - Stopping Service", 'string')
+  clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(ricci_agent)
+  #Now we need to annotate the new DB object
+  objpath = path + "/" + ricci_agent
+  flag = self.restrictedTraverse(objpath)
+  flag[BATCH_ID] = batch_id
+  flag[TASKTYPE] = SERVICE_STOP
+  flag[FLAG_DESC] = "Stopping service " + svcname
+
   time.sleep(2)
 
   response = req.RESPONSE
@@ -1276,9 +1309,8 @@
   ##3) The ricci agent has no recollection of the task, so handle like 1 above
   for item in items:
     #Check here for more than 1 entry (an error)
-    datastrings = item[0].split('___')
-    rb = ricci_bridge(datastrings[0])
-    finished = rb.checkBatch(datastrings[1]) 
+    rb = ricci_bridge(item[0])
+    finished = rb.checkBatch(item[1][BATCH_ID]) 
     if finished == True:
       clusterfolder.manage_delObjects(item[0])
       return map
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/07/25 02:28:46	1.5
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/07/28 14:02:45	1.6
@@ -52,7 +52,10 @@
 
 PAGETYPE="pagetype"
 ACTIONTYPE="actiontype"
+TASKTYPE="tasktype"
 CLUNAME="clustername"
+BATCH_ID="batch_id"
+FLAG_DESC="flag_desc"
 
 PATH_TO_PRIVKEY="/var/lib/luci/var/certs/privkey.pem"
 PATH_TO_CACERT="/var/lib/luci/var/certs/cacert.pem"



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-28 11:46 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-28 11:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-28 11:46:35

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	Big fixes for resource support

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.17&r2=1.18
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.9&r2=1.10

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/27 16:34:11	1.17
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/28 11:46:35	1.18
@@ -5,6 +5,13 @@
 from ricci_bridge import *
 import time
 import Products.ManagedSystem
+from Ip import Ip
+from Clusterfs import Clusterfs
+from Fs import Fs
+from NFSClient import NFSClient
+from Netfs import Netfs
+from Script import Script
+from Samba import Samba
 
 #Policy for showing the cluster chooser menu:
 #1) If there are no clusters in the ManagedClusterSystems
@@ -1320,7 +1327,7 @@
                                                                                 
   return {}
                                                                                 
-def addResource(self, request):
+def addResource(self, request, ragent):
   if not request.form:
     return "Nothing submitted, no changes made."
                                                                                 
@@ -1336,20 +1343,32 @@
            'smb': addSmb}
                                                                                 
   type = request.form["type"]
-  return types[type](request)
+  res = types[type](request)
+  modelb = request.SESSION.get('model')
+  modelstr = ""
+  conf = modelb.exportModelAsString()
+  rb = ricci_bridge(ragent)
+  #try:
+  if True:
+    batch_id, result = rb.setClusterConf(str(conf))
+  #except:
+  else:
+    return "Some error occured in setClusterConf\n"
+
+  return "Resource Added"
 
 def getResourceForEdit(modelb, name):
-  resPtr = modelb.getResourcesPtr()
-  resources = resPrt.getChildren()
-                                                                                
+  resources = modelb.getResourcesPtr().getChildren()
+                                                                               
   for res in resources:
     if res.getName() == name:
       resources.removeChild(res)
       break
-                                                                                
+                                                                              
   return res
+
 def addIp(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1365,7 +1384,7 @@
   return res
 
 def addFs(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1396,7 +1415,7 @@
   return res
                                                                                 
 def addGfs(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1417,7 +1436,7 @@
   return res
 
 def addNfsm(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1439,7 +1458,7 @@
   return res
                                                                                 
 def addNfsc(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1453,7 +1472,7 @@
   return res
                                                                                 
 def addNfsx(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1465,7 +1484,7 @@
   return res
 
 def addScr(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1478,7 +1497,7 @@
   return res
                                                                                 
 def addSmb(request):
-  modelb = request.SESSION["model"]
+  modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
     res = getResourceForEdit(modelb, request.form['oldname'])
   else:
@@ -1493,5 +1512,8 @@
 def appendModel(request, model):
   try:
     request.SESSION.set('model', model)
-  except: pass
+  except:
+    pass
+  return
+
 
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/27 14:23:59	1.9
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/28 11:46:35	1.10
@@ -1,4 +1,3 @@
-
 from time import *
 import os
 from socket import *
@@ -10,526 +9,516 @@
 
 
 class ricci_bridge:
-    def __init__(self, hostname, port=11111):
-        self.__hostname = hostname
-        self.__port = port
-        return
+  def __init__(self, hostname, port=11111):
+    self.__hostname = hostname
+    self.__port = port
+    return
     
     
-    def process(self, xml_out):
-        #if os.path.exists("/opt/zopetooey/Extensions/certs/privkey.pem"):
-        #  return "<h1>Path Exists</h1>"
-        #else:
-        #  return "<h1>Path Does Not Exist</h1>"
-        CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="get_cluster.conf"/></request></module></batch></ricci>'
+  def process(self, xml_out):
+    CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="get_cluster.conf"/></request></module></batch></ricci>'
         
-        doc = self.makeConnection(CLUSTER_STR)
+    doc = self.makeConnection(CLUSTER_STR)
         
-        if doc == None:
-          print "Sorry, doc is None"
-        if doc != None:
-          bt_node = None
-          for node in doc.firstChild.childNodes:
+    if doc == None:
+      print "Sorry, doc is None"
+    if doc != None:
+      bt_node = None
+      for node in doc.firstChild.childNodes:
+        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+          if node.nodeName == 'batch':
+            bt_node = node
+      if bt_node == None:
+        print "bt_node == None"
+        doc = None
+      else:
+        #print doc.toxml()
+        mod_node = None
+        for node in bt_node.childNodes:
             if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'batch':
-                bt_node = node
-          if bt_node == None:
-            print "bt_node == None"
+                if node.nodeName == 'module':
+                    mod_node = node
+        if mod_node == None:
+            print "mod_node == None"
             doc = None
-          else:
-            #print doc.toxml()
-            mod_node = None
-            for node in bt_node.childNodes:
+        else:
+            resp_node = None
+            for node in mod_node.childNodes:
                 if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    if node.nodeName == 'module':
-                        mod_node = node
-            if mod_node == None:
-                print "mod_node == None"
+                    resp_node = node
+            if resp_node == None:
+                print "resp_node == None"
                 doc = None
             else:
-                resp_node = None
-                for node in mod_node.childNodes:
+                fr_node = None
+                for node in resp_node.childNodes:
                     if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                        resp_node = node
-                if resp_node == None:
-                    print "resp_node == None"
+                      fr_node = node
+                if fr_node == None:
+                    print "fr_node == None"
                     doc = None
                 else:
-                    fr_node = None
-                    for node in resp_node.childNodes:
-                        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                          fr_node = node
-                    if fr_node == None:
-                        print "fr_node == None"
-                        doc = None
+                    varnode = None
+                    for node in fr_node.childNodes:
+                      if node.nodeName == 'var':
+                          varnode = node
+                          break
+                    if varnode == None:
+                       print "varnode == None"
+                       doc = None
                     else:
-                        varnode = None
-                        for node in fr_node.childNodes:
-                          if node.nodeName == 'var':
-                              varnode = node
+                      cl_node = None
+                      for node in varnode.childNodes:
+                          if node.nodeName == 'cluster':
+                              cl_node = node
                               break
-                        if varnode == None:
-                           print "varnode == None"
-                           doc = None
-                        else:
-                          cl_node = None
-                          for node in varnode.childNodes:
-                              if node.nodeName == 'cluster':
-                                  cl_node = node
-                                  break
-                          if cl_node == None:
-                            print "cl_node == None"
-                            doc = None
-                          else:
-                              docc = minidom.Document()
-                              docc.appendChild(cl_node)
-
-        return docc
-    
-    def __sendall(self, str, ssl_sock):
-        print str
-        s = str
-        while len(s) != 0:
-            pos = ssl_sock.write(s)
-            s = s[pos:]
-        return
-    
+                      if cl_node == None:
+                        print "cl_node == None"
+                        doc = None
+                      else:
+                          docc = minidom.Document()
+                          docc.appendChild(cl_node)
+
+    return docc
     
-    def __receive(self, ssl_sock):
-        doc = None
-        xml_in = ''
-        try:
-            while True:
-                buff = ssl_sock.read(1024)
-                if buff == '':
-                    break
-                xml_in += buff
-                try:
-                    doc = minidom.parseString(xml_in)
-                    break
-                except:
-                    pass
-        except:
-            pass
-        try:
-            #print 'try parse xml'
-            doc = minidom.parseString(xml_in)
-            #print 'xml is good'
-        except:
-            pass
-        #print "response--------------------------------"
-        #print doc.toxml()
-        #print "response--------------------------------"
-        return doc
+  def __sendall(self, str, ssl_sock):
+    print str
+    s = str
+    while len(s) != 0:
+        pos = ssl_sock.write(s)
+        s = s[pos:]
+    return
 
-    def getClusterStatus(self):
-        CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module></batch></ricci>'
-        # socket
-        sock = socket(AF_INET, SOCK_STREAM)
-        sock.connect((self.__hostname, self.__port))
-        ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
-
-        # receive ricci header
-        hello = self.__receive(ss)
-        if hello != None:
-            pass
-            #print hello.toxml()
-        self.__sendall(CLUSTER_STR, ss)
-        
-        
-        # receive response
-        doc = self.__receive(ss)
-        if doc == None:
-          print "Sorry, doc is None"
-        else:
-          payload = self.extractPayload(doc)
-        sock.shutdown(2)
-        sock.close()
-
-        return payload
-
-    def startService(self,servicename, preferrednode = None):
-        if preferrednode != None:
-          QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/><var mutable="false" name="nodename" type="string" value=\"' + preferrednode + '\" /></function_call></request></module></batch></ricci>'
-        else:
-          QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
+    
+  def __receive(self, ssl_sock):
+    doc = None
+    xml_in = ''
+    try:
+        while True:
+            buff = ssl_sock.read(1024)
+            if buff == '':
+                break
+            xml_in += buff
+            try:
+                doc = minidom.parseString(xml_in)
+                break
+            except:
+                pass
+    except:
+        pass
+    try:
+        #print 'try parse xml'
+        doc = minidom.parseString(xml_in)
+        #print 'xml is good'
+    except:
+        pass
+    return doc
+
+  def getClusterStatus(self):
+    CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module></batch></ricci>'
+    # socket
+    sock = socket(AF_INET, SOCK_STREAM)
+    sock.connect((self.__hostname, self.__port))
+    ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
+
+    # receive ricci header
+    hello = self.__receive(ss)
+    if hello != None:
+        pass
+        #print hello.toxml()
+    self.__sendall(CLUSTER_STR, ss)
+    
+    
+    # receive response
+    doc = self.__receive(ss)
+    if doc == None:
+      print "Sorry, doc is None"
+    else:
+      payload = self.extractPayload(doc)
+    sock.shutdown(2)
+    sock.close()
+
+    return payload
+
+  def startService(self,servicename, preferrednode = None):
+    if preferrednode != None:
+      QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/><var mutable="false" name="nodename" type="string" value=\"' + preferrednode + '\" /></function_call></request></module></batch></ricci>'
+    else:
+      QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
 
-        try:
-          payload = self.makeConnection(QUERY_STR)
-        except RicciReceiveError, r:
-          return None
-          
+    try:
+      payload = self.makeConnection(QUERY_STR)
+    except RicciReceiveError, r:
+      return None
+      
 
-        batch_number, result = self.batchAttemptResult(payload)
-        return (batch_number, result)
+    batch_number, result = self.batchAttemptResult(payload)
+    return (batch_number, result)
 
-    def restartService(self,servicename):
-        QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
+  def restartService(self,servicename):
+    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
 
-        try:
-          payload = self.makeConnection(QUERY_STR)
-        except RicciReceiveError, r:
-          return None
-          
+    try:
+      payload = self.makeConnection(QUERY_STR)
+    except RicciReceiveError, r:
+      return None
+      
 
-        batch_number, result = self.batchAttemptResult(payload)
-        return (batch_number, result)
+    batch_number, result = self.batchAttemptResult(payload)
+    return (batch_number, result)
 
 
-    def stopService(self,servicename):
-        QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
+  def stopService(self,servicename):
+    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module></batch></ricci>'
 
-        try:
-          payload = self.makeConnection(QUERY_STR)
-        except RicciReceiveError, r:
-          return None
-          
+    try:
+      payload = self.makeConnection(QUERY_STR)
+    except RicciReceiveError, r:
+      return None
+      
 
-        batch_number, result = self.batchAttemptResult(payload)
-        return (batch_number, result)
+    batch_number, result = self.batchAttemptResult(payload)
+    return (batch_number, result)
 
-    def getDaemonStates(self, dlist):
-      CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">'
-                                                                                
-      for item in dlist:
-        CLUSTER_STR = CLUSTER_STR + '<service name=\"' + item + '\"/>'
+  def getDaemonStates(self, dlist):
+    CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">'
                                                                                 
-      CLUSTER_STR = CLUSTER_STR + '</var></function_call></request></module></batch></ricci>'
+    for item in dlist:
+      CLUSTER_STR = CLUSTER_STR + '<service name=\"' + item + '\"/>'
                                                                                 
-      try:
-        payload = self.makeConnection(CLUSTER_STR)
-      except RicciReceiveError, r:
-        return None
+    CLUSTER_STR = CLUSTER_STR + '</var></function_call></request></module></batch></ricci>'
                                                                                 
-      result = self.extractDaemonInfo(payload)
+    try:
+      payload = self.makeConnection(CLUSTER_STR)
+    except RicciReceiveError, r:
+      return None
                                                                                 
-      return result
-                                                                                
-    def makeConnection(self,query_str):
-        # socket
-        sock = socket(AF_INET, SOCK_STREAM)
-        sock.connect((self.__hostname, self.__port))
-        ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
-
-        # receive ricci header
-        hello = self.__receive(ss)
-        if hello != None:
-            pass
-            #print hello.toxml()
-        self.__sendall(query_str, ss)
-        
-        
-        # receive response
-        payload = self.__receive(ss)
-        if payload == None:
-          raise RicciReceiveError('FATAL',"Unable to receive ricci data for %s" % self.__hostname)
-        sock.shutdown(2)
-        sock.close()
-
-        return payload
-
-
-    def extractPayload(self, doc):
-          docc = None
-          bt_node = None
-          for node in doc.firstChild.childNodes:
-            if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'batch':
-                bt_node = node
-          if bt_node == None:
-            doc = None
-          else:
-            #print doc.toxml()
-            mod_node = None
-            for node in bt_node.childNodes:
-                if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    if node.nodeName == 'module':
-                        mod_node = node
-            if mod_node == None:
-                doc = None
-            else:
-                resp_node = None
-                for node in mod_node.childNodes:
-                    if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                        resp_node = node
-                if resp_node == None:
-                    doc = None
-                else:
-                    fr_node = None
-                    for node in resp_node.childNodes:
-                        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                          fr_node = node
-                    if fr_node == None:
-                        doc = None
-                    else:
-                        varnode = None
-                        for node in fr_node.childNodes:
-                          if node.nodeName == 'var':
-                              varnode = node
-                              break
-                        if varnode == None:
-                           doc = None
-                        else:
-                          cl_node = None
-                          for node in varnode.childNodes:
-                              if node.nodeName == 'cluster':
-                                  cl_node = node
-                                  break
-                          if cl_node == None:
-                            doc = None
-                          else:
-                              docc = minidom.Document()
-                              docc.appendChild(cl_node)
-          return docc
-
-
-    def getBatchResult(self, doc):
-          docc = None
-          bt_node = None
-          for node in doc.firstChild.childNodes:
-            if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'batch':
-                bt_node = node
-          if bt_node == None:
-            doc = None
+    result = self.extractDaemonInfo(payload)
+                                                                              
+    return result
+                                                                              
+  def makeConnection(self,query_str):
+    # socket
+    sock = socket(AF_INET, SOCK_STREAM)
+    sock.connect((self.__hostname, self.__port))
+    ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
+
+    # receive ricci header
+    hello = self.__receive(ss)
+    if hello != None:
+        pass
+        #print hello.toxml()
+    self.__sendall(query_str, ss)
+    
+    
+    # receive response
+    payload = self.__receive(ss)
+    if payload == None:
+      raise RicciReceiveError('FATAL',"Unable to receive ricci data for %s" % self.__hostname)
+    sock.shutdown(2)
+    sock.close()
+
+    return payload
+
+
+  def extractPayload(self, doc):
+    docc = None
+    bt_node = None
+    for node in doc.firstChild.childNodes:
+      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+        if node.nodeName == 'batch':
+          bt_node = node
+    if bt_node == None:
+      doc = None
+    else:
+      #print doc.toxml()
+      mod_node = None
+      for node in bt_node.childNodes:
+          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+              if node.nodeName == 'module':
+                  mod_node = node
+      if mod_node == None:
+          doc = None
+      else:
+          resp_node = None
+          for node in mod_node.childNodes:
+              if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                  resp_node = node
+          if resp_node == None:
+              doc = None
           else:
-            #print doc.toxml()
-            mod_node = None
-            for node in bt_node.childNodes:
-                if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    if node.nodeName == 'module':
-                        mod_node = node
-            if mod_node == None:
-                doc = None
-            else:
-                resp_node = None
-                for node in mod_node.childNodes:
-                    if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                        resp_node = node
-                if resp_node == None:
-                    doc = None
-                else:
-                    fr_node = None
-                    for node in resp_node.childNodes:
-                        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                          fr_node = node
-                    if fr_node == None:
-                        doc = None
+              fr_node = None
+              for node in resp_node.childNodes:
+                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                    fr_node = node
+              if fr_node == None:
+                  doc = None
+              else:
+                  varnode = None
+                  for node in fr_node.childNodes:
+                    if node.nodeName == 'var':
+                        varnode = node
+                        break
+                  if varnode == None:
+                     doc = None
+                  else:
+                    cl_node = None
+                    for node in varnode.childNodes:
+                        if node.nodeName == 'cluster':
+                            cl_node = node
+                            break
+                    if cl_node == None:
+                      doc = None
                     else:
-                        varnode = None
-                        for node in fr_node.childNodes:
-                          if node.nodeName == 'var':
-                              varnode = node
-                              break
-                        if varnode == None:
-                           doc = None
-                        else:
-                          cl_node = None
-                          for node in varnode.childNodes:
-                              if node.nodeName == 'cluster':
-                                  cl_node = node
-                                  break
-                          if cl_node == None:
-                            doc = None
-                          else:
-                              docc = minidom.Document()
-                              docc.appendChild(cl_node)
-          return docc
-
-    def extractClusterConf(self, doc):
-          docc = None
-          bt_node = None
-          for node in doc.firstChild.childNodes:
-            if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'batch':
-                bt_node = node
-          if bt_node == None:
-            print "bt_node == None"
-            doc = None
+                        docc = minidom.Document()
+                        docc.appendChild(cl_node)
+    return docc
+
+
+  def getBatchResult(self, doc):
+    docc = None
+    bt_node = None
+    for node in doc.firstChild.childNodes:
+      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+        if node.nodeName == 'batch':
+          bt_node = node
+    if bt_node == None:
+      doc = None
+    else:
+      #print doc.toxml()
+      mod_node = None
+      for node in bt_node.childNodes:
+          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+              if node.nodeName == 'module':
+                  mod_node = node
+      if mod_node == None:
+          doc = None
+      else:
+          resp_node = None
+          for node in mod_node.childNodes:
+              if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                  resp_node = node
+          if resp_node == None:
+              doc = None
           else:
-            #print doc.toxml()
-            mod_node = None
-            for node in bt_node.childNodes:
-                if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    if node.nodeName == 'module':
-                        mod_node = node
-            if mod_node == None:
-                print "mod_node == None"
-                doc = None
-            else:
-                resp_node = None
-                for node in mod_node.childNodes:
-                    if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                        resp_node = node
-                if resp_node == None:
-                    print "resp_node == None"
-                    doc = None
-                else:
-                    fr_node = None
-                    for node in resp_node.childNodes:
-                        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                          fr_node = node
-                    if fr_node == None:
-                        print "fr_node == None"
-                        doc = None
+              fr_node = None
+              for node in resp_node.childNodes:
+                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                    fr_node = node
+              if fr_node == None:
+                  doc = None
+              else:
+                  varnode = None
+                  for node in fr_node.childNodes:
+                    if node.nodeName == 'var':
+                        varnode = node
+                        break
+                  if varnode == None:
+                     doc = None
+                  else:
+                    cl_node = None
+                    for node in varnode.childNodes:
+                        if node.nodeName == 'cluster':
+                            cl_node = node
+                            break
+                    if cl_node == None:
+                      doc = None
                     else:
-                        varnode = None
-                        for node in fr_node.childNodes:
-                          if node.nodeName == 'var':
-                              varnode = node
-                              break
-                        if varnode == None:
-                           print "varnode == None"
-                           doc = None
-                        else:
-                          cl_node = None
-                          for node in varnode.childNodes:
-                              if node.nodeName == 'cluster':
-                                  cl_node = node
-                                  break
-                          if cl_node == None:
-                            print "cl_node == None"
-                            doc = None
-                          else:
-                              docc = minidom.Document()
-                              docc.appendChild(cl_node)
-
-          return docc
-
-    def extractDaemonInfo(self, doc):
-          print "VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV"
-          print doc.toxml()
-          print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
-          resultlist = list()
-          docc = None
-          bt_node = None
-          for node in doc.firstChild.childNodes:
-            if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'batch':
-                bt_node = node
-          if bt_node == None:
-            print "bt_node == None"
-            doc = None
+                      docc = minidom.Document()
+                      docc.appendChild(cl_node)
+    return docc
+
+  def extractClusterConf(self, doc):
+    docc = None
+    bt_node = None
+    for node in doc.firstChild.childNodes:
+      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+        if node.nodeName == 'batch':
+          bt_node = node
+    if bt_node == None:
+      print "bt_node == None"
+      doc = None
+    else:
+      #print doc.toxml()
+      mod_node = None
+      for node in bt_node.childNodes:
+          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+              if node.nodeName == 'module':
+                  mod_node = node
+      if mod_node == None:
+          print "mod_node == None"
+          doc = None
+      else:
+          resp_node = None
+          for node in mod_node.childNodes:
+              if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                  resp_node = node
+          if resp_node == None:
+              print "resp_node == None"
+              doc = None
           else:
-            #print doc.toxml()
-            mod_node = None
-            for node in bt_node.childNodes:
-                if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                    if node.nodeName == 'module':
-                        mod_node = node
-            if mod_node == None:
-                print "mod_node == None"
-                doc = None
-            else:
-                resp_node = None
-                for node in mod_node.childNodes:
-                    if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                        resp_node = node
-                if resp_node == None:
-                    print "resp_node == None"
-                    doc = None
-                else:
-                    fr_node = None
-                    for node in resp_node.childNodes:
-                        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                          fr_node = node
-                    if fr_node == None:
-                        print "fr_node == None"
-                        doc = None
+              fr_node = None
+              for node in resp_node.childNodes:
+                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                    fr_node = node
+              if fr_node == None:
+                  print "fr_node == None"
+                  doc = None
+              else:
+                  varnode = None
+                  for node in fr_node.childNodes:
+                    if node.nodeName == 'var':
+                        varnode = node
+                        break
+                  if varnode == None:
+                     print "varnode == None"
+                     doc = None
+                  else:
+                    cl_node = None
+                    for node in varnode.childNodes:
+                        if node.nodeName == 'cluster':
+                            cl_node = node
+                            break
+                    if cl_node == None:
+                      print "cl_node == None"
+                      doc = None
                     else:
-                        varnode = None
-                        for node in fr_node.childNodes:
-                          if node.nodeName == 'var':
-                              varnode = node
-                              break
-                        if varnode == None:
-                           print "varnode == None"
-                           doc = None
-                        else:
-                          svc_node = None
-                          for node in varnode.childNodes:
-                              if node.nodeName == 'service':
-                                  svchash = {}
-                                  svchash['name'] = node.getAttribute('name')
-                                  svchash['enabled'] = node.getAttribute('enabled')
-                                  svchash['running'] = node.getAttribute('running')
-                                  resultlist.append(svchash)
-                                                                                
-          return resultlist
+                        docc = minidom.Document()
+                        docc.appendChild(cl_node)
+
+    return docc
 
-    def batchAttemptResult(self, doc):
-          docc = None
-          rc_node = None
-          for node in doc.firstChild.childNodes:
+  def extractDaemonInfo(self, doc):
+    resultlist = list()
+    docc = None
+    bt_node = None
+    for node in doc.firstChild.childNodes:
+      if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+        if node.nodeName == 'batch':
+          bt_node = node
+    if bt_node == None:
+      print "bt_node == None"
+      doc = None
+    else:
+      #print doc.toxml()
+      mod_node = None
+      for node in bt_node.childNodes:
+          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+              if node.nodeName == 'module':
+                  mod_node = node
+      if mod_node == None:
+          print "mod_node == None"
+          doc = None
+      else:
+          resp_node = None
+          for node in mod_node.childNodes:
               if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                  if node.nodeName == 'batch':
-                      #get batch number and status code
-                      batch_number = node.getAttribute('batch_id')
-                      result = node.getAttribute('status')
-                      return (batch_number, result)
+                  resp_node = node
+          if resp_node == None:
+              print "resp_node == None"
+              doc = None
+          else:
+              fr_node = None
+              for node in resp_node.childNodes:
+                  if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                    fr_node = node
+              if fr_node == None:
+                  print "fr_node == None"
+                  doc = None
+              else:
+                  varnode = None
+                  for node in fr_node.childNodes:
+                    if node.nodeName == 'var':
+                        varnode = node
+                        break
+                  if varnode == None:
+                     print "varnode == None"
+                     doc = None
                   else:
-                      print "RETURNING NONE!!!"
-                      return (None, None )
-      
+                    svc_node = None
+                    for node in varnode.childNodes:
+                        if node.nodeName == 'service':
+                            svchash = {}
+                            svchash['name'] = node.getAttribute('name')
+                            svchash['enabled'] = node.getAttribute('enabled')
+                            svchash['running'] = node.getAttribute('running')
+                            resultlist.append(svchash)
+                                                                          
+    return resultlist
+
+  def batchAttemptResult(self, doc):
+    docc = None
+    rc_node = None
+    for node in doc.firstChild.childNodes:
+        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+            if node.nodeName == 'batch':
+                #get batch number and status code
+                batch_number = node.getAttribute('batch_id')
+                result = node.getAttribute('status')
+                return (batch_number, result)
+            else:
+                print "RETURNING NONE!!!"
+                return (None, None )
+
 
         
-    def getRicciResponse(self):
-      sock = socket(AF_INET, SOCK_STREAM)
-      try:
-        sock.connect((self.__hostname, self.__port))
-      except:
-        return False
-      ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
-      # receive ricci header
-      hello = self.__receive(ss)
-      if hello != None:
-        return True
-      else:
-        return False
+  def getRicciResponse(self):
+    sock = socket(AF_INET, SOCK_STREAM)
+    try:
+      sock.connect((self.__hostname, self.__port))
+    except:
+      return False
+    ss = ssl(sock, PATH_TO_PRIVKEY, PATH_TO_CACERT)
+    # receive ricci header
+    hello = self.__receive(ss)
+    if hello != None:
+      return True
+    else:
+      return False
 
-    def checkBatch(self, batch_id):
-      QUERY_STR = '<?xml version="1.0" ?><ricci version="1.0" function="batch_report" batch_id="' + batch_id + '"/>'
+  def checkBatch(self, batch_id):
+    QUERY_STR = '<?xml version="1.0" ?><ricci version="1.0" function="batch_report" batch_id="' + batch_id + '"/>'
 
-      try:
-        payload = self.makeConnection(QUERY_STR)
-      except RicciReceiveError, r:
-        return None
-          
-      #return true if finished or not present
-      success = payload.firstChild.getAttribute('success')
-      if success != "0":
-          return True  #I think this is ok...if id cannot be found
-      for node in payload.firstChild.childNodes:
-          if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-              if node.nodeName == 'batch':
-                  #get batch number and status code
-                  batch_number = node.getAttribute('batch_id')
-                  result = node.getAttribute('status')
-                  if result == "0":
-                      return True
-                  else:
-                      return False
-              else:
-                  return False
+    try:
+      payload = self.makeConnection(QUERY_STR)
+    except RicciReceiveError, r:
+      return None
+        
+    #return true if finished or not present
+    success = payload.firstChild.getAttribute('success')
+    if success != "0":
+        return True  #I think this is ok...if id cannot be found
+    for node in payload.firstChild.childNodes:
+        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+            if node.nodeName == 'batch':
+                #get batch number and status code
+                batch_number = node.getAttribute('batch_id')
+                result = node.getAttribute('status')
+                if result == "0":
+                    return True
+                else:
+                    return False
+            else:
+                return False
 
-      return False
+    return False
 
-def setClusterConf(clusterconf, propagate=True):
-  if propagate == True:
-    propg = "True"
-  else:
-    propg = "False"
-
-  QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"/><var propagate="' + propg + '" cluster.conf="' + clusterconf + '"/></request></module></batch></ricci>'
-
-  try:
-    payload = self.makeConnection(QUERY_STR)
-  except RicciReceiveError, r:
-    return None
-                                                                                
-                                                                                
-  batch_number, result = self.batchAttemptResult(payload)
-  return (batch_number, result)
+  def setClusterConf(self, clusterconf, propagate=True):
+    if propagate == True:
+      propg = "True"
+    else:
+      propg = "False"
+
+    QUERY_STR='<?xml version="1.0" ?><ricci async="true" function="process_batch" version="1.0"><batch><module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"/><var propagate="' + propg + '" cluster.conf="' + str(clusterconf) + '"/></request></module></batch></ricci>'
+
+    try:
+      payload = self.makeConnection(QUERY_STR)
+    except RicciReceiveError, r:
+      return None
+                                                                            
+                                                                            
+    batch_number, result = self.batchAttemptResult(payload)
+    return (batch_number, result)
 
 
 def createClusterBatch(cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-27 16:34 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-27 16:34 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-27 16:34:11

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Methods for resource creation and editing

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.16&r2=1.17

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/27 15:53:00	1.16
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/27 16:34:11	1.17
@@ -1291,3 +1291,207 @@
       req['specialpagetype'] = "1"
       return map
   return map
+
+def getResourcesInfo(modelb, request):
+  resList = list()
+  baseurl = request['URL']
+  cluname = request['clustername']
+  for item in modelb.getResources():
+    itemmap = {}
+    itemmap['name'] = item.getName()
+    itemmap['type'] = item.resource_type
+    itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_CONFIG
+    itemmap['url'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE
+    resList.append(itemmap)
+  return resList
+                                                                                
+def getResourceInfo(modelb, request):
+  resMap = {}
+  name = request['resourcename']
+  baseurl = request['URL']
+  cluname = request['clustername']
+  for res in modelb.getResources():
+    if res.getName() == name:
+          resMap['name'] = res.getName()
+          resMap['type'] = res.resource_type
+          resMap['tag_name'] = res.TAG_NAME
+          resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
+          return resMap
+                                                                                
+  return {}
+                                                                                
+def addResource(self, request):
+  if not request.form:
+    return "Nothing submitted, no changes made."
+                                                                                
+  if not request.form["resourceName"]:
+    return "Please enter a name for the resource."
+  types = {'ip': addIp,
+           'fs': addFs,
+           'gfs': addGfs,
+           'nfsm': addNfsm,
+           'nfsx': addNfsx,
+           'nfsc': addNfsx,
+           'scr': addScr,
+           'smb': addSmb}
+                                                                                
+  type = request.form["type"]
+  return types[type](request)
+
+def getResourceForEdit(modelb, name):
+  resPtr = modelb.getResourcesPtr()
+  resources = resPrt.getChildren()
+                                                                                
+  for res in resources:
+    if res.getName() == name:
+      resources.removeChild(res)
+      break
+                                                                                
+  return res
+def addIp(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(Ip)
+  form = request.form
+  addr = form["ip1"]+"."+form["ip2"]+"."+form["ip3"]+"."+form["ip4"]
+  res.attr_hash["address"] = addr
+  if form.has_key('monitorLink'):
+    res.attr_hash["monitor_link"] = '1'
+  else:
+    res.attr_hash["monitor_link"] = '0'
+  modelb.getResourcesPtr().addChild(res);
+  return res
+
+def addFs(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(Fs)
+  form = request.form
+  res.attr_hash["name"] = form["resourceName"]
+  res.attr_hash["mountpoint"] = form["mountpoint"]
+  res.attr_hash["device"] = form["device"]
+  res.attr_hash["options"] = form["options"]
+  res.attr_hash["fstype"] = form["fstype"]
+  res.attr_hash["fsid"] = form["fsid"]
+  if form.has_key('forceunmount'):
+    res.attr_hash["force_unmount"] = '1'
+  else:
+    res.attr_hash["force_unmount"] = '0'
+                                                                                
+  if form.has_key('selffence'):
+    res.attr_hash["self_fence"] = '1'
+  else:
+    res.attr_hash["self_fence"] = '0'
+                                                                                
+  if form.has_key('checkfs'):
+    res.attr_hash["force_fsck"] = '1'
+  else:
+    res.attr_hash["force_fsck"] = '0'
+                                                                                
+  modelb.getResourcesPtr().addChild(res);
+  return res
+                                                                                
+def addGfs(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(Clusterfs)
+  form = request.form
+  res.attr_hash["name"] = form["resourceName"]
+  res.attr_hash["mountpoint"] = form["mountpoint"]
+  res.attr_hash["device"] = form["device"]
+  res.attr_hash["options"] = form["options"]
+  res.attr_hash["fsid"] = form["fsid"]
+                                                                                
+  if form.has_key('forceunmount'):
+    res.attr_hash["force_unmount"] = '1'
+  else:
+    res.attr_hash["force_unmount"] = '0'
+                                                                                
+  modelb.getResourcesPtr().addChild(res);
+  return res
+
+def addNfsm(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(Netfs)
+  form = request.form
+  res.attr_hash["name"] = form["resourceName"]
+  res.attr_hash["mountpoint"] = form["mountpoint"]
+  res.attr_hash["host"] = form["host"]
+  res.attr_hash["options"] = form["options"]
+  res.attr_hash["exportpath"] = form["export"]
+  res.attr_hash["nfstype"] = form["fstype"]
+                                                                                
+  if form.has_key('forceunmount'):
+    res.attr_hash["force_unmount"] = '1'
+  else:
+    res.attr_hash["force_unmount"] = '0'
+                                                                                
+  modelb.getResourcesPtr().addChild(res);
+  return res
+                                                                                
+def addNfsc(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(NFSClient)
+  form = request.form
+  res.attr_hash["name"] = form["resourceName"]
+  res.attr_hash["target"] = form["target"]
+  res.attr_hash["options"] = form["options"]
+                                                                                
+  modelb.getResourcesPtr().addChild(res);
+  return res
+                                                                                
+def addNfsx(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(NFSExport)
+  form = request.form
+  res.attr_hash["name"] = form["resourceName"]
+                                                                                
+  modelb.getResourcesPtr().addChild(res);
+  return res
+
+def addScr(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(Script)
+  form = request.form
+  res.attr_hash["name"] = form["resourceName"]
+  res.attr_hash["file"] = form["file"]
+                                                                                
+  modelb.getResourcesPtr().addChild(res);
+  return res
+                                                                                
+def addSmb(request):
+  modelb = request.SESSION["model"]
+  if request.form.has_key('edit'):
+    res = getResourceForEdit(modelb, request.form['oldname'])
+  else:
+    res = apply(Samba)
+  form = request.form
+  res.attr_hash["name"] = form["resourceName"]
+  res.attr_hash["workgroup"] = form["workgroup"]
+                                                                                
+  modelb.getResourcesPtr().addChild(res);
+  return res
+                                                                                
+def appendModel(request, model):
+  try:
+    request.SESSION.set('model', model)
+  except: pass
+



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-27 15:53 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-07-27 15:53 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-27 15:53:00

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	typo

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.15&r2=1.16

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/25 20:16:21	1.15
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/27 15:53:00	1.16
@@ -146,7 +146,7 @@
 
 	return None
 
-formValidator = {
+formValidators = {
 	6: validateCreateCluster,
 	15: validateAddClusterNode
 }



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-25 20:16 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-25 20:16 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-25 20:16:21

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fixed more quorum stuff in cluster info

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.14&r2=1.15

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/25 20:01:12	1.14
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/25 20:16:21	1.15
@@ -955,10 +955,13 @@
     return
 
   elif actiontype == FENCEDAEMON:
+    pass
 
   elif actiontype == MULTICAST:
+    pass
 
   elif actiontype == QUORUMD:
+    pass
 
   else:
     return
@@ -1019,6 +1022,10 @@
   map['min_score'] = ""
   map['device'] = ""
   map['label'] = ""
+
+  #list struct for heuristics...
+  hlist = list()
+
   if is_quorumd:
     qdp = model.getQuorumdPtr()
     interval = qdp.getAttribute('interval')
@@ -1047,7 +1054,6 @@
 
     heuristic_kids = qdp.getChildren()
     h_ctr = 0
-    hlist = list()
     for kid in heuristic_kids:
       hmap = {}
       hname = kid.getAttribute('name')
@@ -1073,7 +1079,7 @@
       else:
         hmap['hinterval'] = ""
       hlist.append(hmap)
-    map['hlist'] = hlist
+  map['hlist'] = hlist
 
   return map
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-25 20:01 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-25 20:01 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-25 20:01:12

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fixed quorum stuff in cluster info

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.13&r2=1.14

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/25 00:56:56	1.13
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/25 20:01:12	1.14
@@ -923,7 +923,47 @@
 
   response = req.RESPONSE
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
+ 
+def processClusterProps(self, ricci_agent, request):
+  #First, retrieve cluster.conf from session
+  conf = request.SESSION.get('conf')
+  model = ModelBuilder(0, None, None, conf)
+
+  #Next, determine actiontype and switch on it
+  actiontype = request[ACTIONTYPE]
+
+  if actiontype == BASECLUSTER:
+    cp = model.getClusterPtr()
+    cfgver = cp.getConfigVersion()
+
+    rcfgver = request['cfgver']
+
+    if cfgver != rcfgver:
+      cint = int(cfgver)
+      rint = int(rcfgver)
+      if rint > cint:
+        cp.setConfigVersion(rcfgver)
+
+    rname = request['cluname']
+    name = model.getClusterAlias()
+
+    if rname != name:
+      cp.addAttribute('alias', rname)
+
+    response = request.RESPONSE
+    response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
+    return
+
+  elif actiontype == FENCEDAEMON:
+
+  elif actiontype == MULTICAST:
+
+  elif actiontype == QUORUMD:
+
+  else:
+    return
   
+ 
 def getClusterInfo(self, model, req):
   cluname = req[CLUNAME]
   baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
@@ -973,14 +1013,38 @@
   map['quorumd_url'] = quorumd_url
   is_quorumd = model.isQuorumd()
   map['is_quorumd'] = is_quorumd
+  map['interval'] = ""
+  map['tko'] = ""
+  map['votes'] = ""
+  map['min_score'] = ""
+  map['device'] = ""
+  map['label'] = ""
   if is_quorumd:
     qdp = model.getQuorumdPtr()
     interval = qdp.getAttribute('interval')
+    if interval != None:
+      map['interval'] = interval
+
     tko = qdp.getAttribute('tko')
+    if tko != None:
+      map['tko'] = tko
+
     votes = qdp.getAttribute('votes')
+    if votes != None:
+      map['votes'] = votes
+
     min_score = qdp.getAttribute('min_score')
+    if min_score != None:
+      map['min_score'] = min_score
+
     device = qdp.getAttribute('device')
+    if device != None:
+      map['device'] = device
+
     label = qdp.getAttribute('label')
+    if label != None:
+      map['label'] = label
+
     heuristic_kids = qdp.getChildren()
     h_ctr = 0
     hlist = list()
@@ -995,15 +1059,21 @@
       hinterval = kid.getAttribute('interval')
       if hprog == None:
         continue
-      hmap['hname'] = hname
+      if hname != None:
+        hmap['hname'] = hname
+      else:
+        hmap['hname'] = ""
       hmap['hprog'] = hprog
       if hscore != None:
         hmap['hscore'] = hscore
+      else:
+        hmap['hscore'] = ""
       if hinterval != None:
         hmap['hinterval'] = hinterval
+      else:
+        hmap['hinterval'] = ""
       hlist.append(hmap)
-    if len(hlist) > 0:
-      map['hlist'] = hlist
+    map['hlist'] = hlist
 
   return map
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-25  0:56 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-25  0:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-25 00:56:56

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	Fixed null mcast addr error

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.12&r2=1.13

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/24 21:51:33	1.12
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/25 00:56:56	1.13
@@ -194,9 +194,9 @@
   cldata = {}
   cldata['Title'] = "Cluster List"
   cldata['cfg_type'] = "clusters"
-  cldata['absolute_url'] = url + "?pagetype=" + CLUSTERS
+  cldata['absolute_url'] = url + "?pagetype=" + CLUSTERLIST
   cldata['Description'] = "Clusters available for configuration"
-  if pagetype == CLUSTERS:
+  if pagetype == CLUSTERLIST:
     cldata['currentItem'] = True
   else:
     cldata['currentItem'] = False
@@ -214,16 +214,16 @@
   clcfg = {}
   clcfg['Title'] = "Configure"
   clcfg['cfg_type'] = "clustercfg"
-  clcfg['absolute_url'] = url + "?pagetype=" + CLUSTER_CONFIG
+  clcfg['absolute_url'] = url + "?pagetype=" + CLUSTERS
   clcfg['Description'] = "Configure a cluster"
-  if pagetype == CLUSTER_CONFIG:
+  if pagetype == CLUSTERS:
     clcfg['currentItem'] = True
   else:
     clcfg['currentItem'] = False
 
   #test...
   #clcfg['show_children'] = True
-  if pagetype == CLUSTER_CONFIG or pagetype == CLUSTER:
+  if pagetype == CLUSTERS:
     clcfg['show_children'] = True
   else:
     clcfg['show_children'] = False
@@ -237,7 +237,7 @@
     clsys['absolute_url'] = url + "?pagetype=" + CLUSTER + "&clustername=" + system[0]
     clsys['Description'] = "Configure this cluster"
 
-    if pagetype == CLUSTER:
+    if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
       if cname == system[0]:
         clsys['currentItem'] = True
       else:
@@ -688,7 +688,7 @@
 
   ctab = { 'Title':"cluster",
            'Description':"Cluster configuration page", 
-           'Taburl':"../cluster"}
+           'Taburl':"../cluster?pagetype=3"}
   if selectedtab == "cluster":
     ctab['isSelected'] = True
   else:
@@ -965,6 +965,7 @@
     map['is_mcast'] = "True"
   else:
     map['is_mcast'] = "False"
+    map['mcast_addr'] = "1.2.3.4"
     
   #-------------
   #quorum disk params



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-24 21:51 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-24 21:51 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-24 21:51:33

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	added exception handler in status check

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.11&r2=1.12

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/24 21:13:45	1.11
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/24 21:51:33	1.12
@@ -763,7 +763,10 @@
   results = list()
   vals = {}
   vals['type'] = "cluster"
-  vals['alias'] = doc.firstChild.getAttribute('alias')
+  try:
+    vals['alias'] = doc.firstChild.getAttribute('alias')
+  except AttributeError, e:
+    vals['alias'] = doc.firstChild.getAttribute('name')
   vals['votes'] = doc.firstChild.getAttribute('votes')
   vals['name'] = doc.firstChild.getAttribute('name')
   vals['minQuorum'] = doc.firstChild.getAttribute('minQuorum')



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-24 21:13 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-24 21:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-24 21:13:45

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	minor change in mcast section

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.10&r2=1.11

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/24 19:50:49	1.10
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/24 21:13:45	1.11
@@ -922,8 +922,8 @@
   response.redirect(req['HTTP_REFERER'] + "&busyfirst=true")
   
 def getClusterInfo(self, model, req):
-  baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
   cluname = req[CLUNAME]
+  baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
   map = {}
   basecluster_url = baseurl + ACTIONTYPE + "=" + BASECLUSTER
   #needed:
@@ -956,9 +956,12 @@
   map['multicast_url'] = multicast_url
   #mcast addr
   is_mcast = model.isMulticast()
-  map['is_mcast'] = is_mcast
+  #map['is_mcast'] = is_mcast
   if is_mcast:
     map['mcast_addr'] = model.getMcastAddr()
+    map['is_mcast'] = "True"
+  else:
+    map['is_mcast'] = "False"
     
   #-------------
   #quorum disk params



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-24 19:50 jparsons
  0 siblings, 0 replies; 185+ messages in thread
From: jparsons @ 2006-07-24 19:50 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	jparsons at sourceware.org	2006-07-24 19:50:49

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	syntax mistake

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.9&r2=1.10

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/24 19:39:06	1.9
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/24 19:50:49	1.10
@@ -982,7 +982,7 @@
       hname = kid.getAttribute('name')
       if hname == None:
         hname = h_ctr
-        h_ctr++
+        h_ctr = h_ctr + 1
       hprog = kid.getAttribute('program')
       hscore = kid.getAttribute('score')
       hinterval = kid.getAttribute('interval')



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-19 22:28 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-07-19 22:28 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-19 22:28:17

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py ricci_bridge.py 

Log message:
	remaining bits for cluster creation

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.5&r2=1.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.15&r2=1.16
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.6&r2=1.7

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/19 21:38:36	1.5
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/19 22:28:17	1.6
@@ -112,6 +112,8 @@
 		return (False, {'errors': errors, 'requestResults':cluster_properties })
 
 	if cluster_properties['isComplete'] == True:
+		from ricci_communicator import RicciCommunicator
+
 		batchNode = createClusterBatch(clusterName, clusterName, map(lambda x: x['ricci_host'], nodeList), True, False, False)
 		if not batchNode:
 			nodeUnauth(nodeList)
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/07/19 21:38:36	1.15
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/07/19 22:28:17	1.16
@@ -174,14 +174,14 @@
 		if systemName[:9] == 'localhost' or systemName[:5] == '127.0':
 			systemName = host
 		node = { 'host': host, 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': True }
+
 		cluster_info = rc.cluster_info()
-		if not cluster or not cluster_info or cluster_info[0] != cluster:
-			if cluster:
-				node['errors'] = 'Node \"' + host + '\" is reporting it is not a member of cluster \"' + cluster + '\"'
-				if cluster_info[0]:
-					node['errors'] += ' and that it is a member of cluster \"' + cluster_info[0] + '\"'
-			else:
-				node['errors'] = 'Node \"' + host + '\" reports it is a member of cluster \"' + cluster_info[0] + '\"'
+		if cluster and ((not cluster_info) or (cluster_info[0] != cluster)):
+			node['errors'] = 'Node \"' + host + '\" is reporting it is not a member of cluster \"' + cluster + '\"'
+			if cluster_info and cluster_info[0]:
+				node['errors'] += ' and that it is a member of cluster \"' + cluster_info[0] + '\"'
+		if not cluster and cluster_info and cluster_info[0]:
+			node['errors'] = 'Node \"' + host + '\" reports it is a member of cluster \"' + cluster_info[0] + '\"'
 		return node
 
 	error = 'Unable to authenticate to the ricci agent on \"' + host + '\"'
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/19 21:38:36	1.6
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/19 22:28:17	1.7
@@ -572,4 +572,4 @@
     batch += '</module>'
     batch += '</batch>'
 
-	return minidom.parseString(batch).firstChild
+    return minidom.parseString(batch).firstChild



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-19 21:38 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-07-19 21:38 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-19 21:38:36

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py ricci_bridge.py 

Log message:
	flesh out cluster creation

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.4&r2=1.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.14&r2=1.15
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.5&r2=1.6

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/19 20:57:39	1.4
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/19 21:38:36	1.5
@@ -2,7 +2,7 @@
 from ZPublisher import HTTPRequest
 import AccessControl
 from conga_constants import *
-from ricci_bridge import ricci_bridge
+from ricci_bridge import *
 import time
 import Products.ManagedSystem
 
@@ -23,6 +23,7 @@
 		return
 
 	errors = list()
+	messages = list()
 	nodeList = list()
 	nodeHash = {}
 	rnodeHash = {}
@@ -107,13 +108,29 @@
 	}
 
 	if cluster_properties['isComplete'] != True:
+		nodeUnauth(nodeList)
 		return (False, {'errors': errors, 'requestResults':cluster_properties })
 
 	if cluster_properties['isComplete'] == True:
+		batchNode = createClusterBatch(clusterName, clusterName, map(lambda x: x['ricci_host'], nodeList), True, False, False)
+		if not batchNode:
+			nodeUnauth(nodeList)
+			cluster_properties['isComplete'] = False
+			errors.append('Unable to generate cluster creation ricci command')
+			return (False, {'errors': errors, 'requestResults':cluster_properties })
+
 		for i in nodeList:
-			i = i # yell at ricci
+			try:
+				rc = RicciCommunicator(i['ricci_host'])
+				resultNode = rc.process_batch(batchNode, async=True)
+			except:
+				nodeUnauth(nodeList)
+				cluster_properties['isComplete'] = False
+				errors.append('An error occurred while attempting to add cluster node \"' + i['ricci_host'] + '\"')
+				return (False, {'errors': errors, 'requestResults':cluster_properties })
 
-	return (len(errors) < 1, {'errors': errors, 'messages': messages })
+	messages.append('Creation of cluster \"' + clusterName + '\" has begun')
+	return (True, {'errors': errors, 'messages': messages })
 
 def createCluChooser(self, request, systems):
   dummynode = {}
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/07/19 20:19:53	1.14
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/07/19 21:38:36	1.15
@@ -137,10 +137,11 @@
 def nodeUnauth(nodeList):
 	for i in nodeList:
 		try:
-			if i['prev_auth'] == True:
+			if i['prev_auth'] != True:
 				host = i['host']
 				rc = RicciCommunicator(host)
 				rc.unauth()
+				i['cur_auth'] = False
 		except:
 			pass
 
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/19 21:02:32	1.5
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/19 21:38:36	1.6
@@ -571,3 +571,5 @@
     batch += '</request>'
     batch += '</module>'
     batch += '</batch>'
+
+	return minidom.parseString(batch).firstChild



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-19 20:57 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-07-19 20:57 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-19 20:57:39

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py ricci_bridge.py 

Log message:
	add in stan's code to generate the batch command for creating a cluster

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.3&r2=1.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.3&r2=1.4

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/19 20:19:53	1.3
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/19 20:57:39	1.4
@@ -105,7 +105,15 @@
 		'rnodeHash': rnodeHash,
 		'isComplete': len(errors) < 1 and len(filter(dfn, nodeList)) == 0
 	}
-	return (len(errors) < 1, {'errors': errors, 'requestResults': cluster_properties })
+
+	if cluster_properties['isComplete'] != True:
+		return (False, {'errors': errors, 'requestResults':cluster_properties })
+
+	if cluster_properties['isComplete'] == True:
+		for i in nodeList:
+			i = i # yell@ricci
+
+	return (len(errors) < 1, {'errors': errors, 'messages': messages })
 
 def createCluChooser(self, request, systems):
   dummynode = {}
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/17 21:58:48	1.3
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/07/19 20:57:39	1.4
@@ -513,3 +513,61 @@
                   return False
 
       return False
+
+def createClusterBatch(cluster_name, nodeList, services, shared_storage, LVS):
+    batch = '<?xml version="1.0" ?>'
+    batch += '<batch>'
+    batch += '<module name="rpm">'
+    batch += '<request API_version="1.0">'
+    batch += '<function_call name="install">'
+    batch += '<var name="sets" type="list_xml">'
+    batch += '<set name="Cluster Base"/>'
+
+    if services:
+        batch += '<set name="Cluster Service Manager"/>'
+
+    if shared_storage:
+        batch += '<set name="Clustered Storage"/>'
+
+    if LVS:
+        batch += '<set name="Linux Virtual Server"/>'
+
+    batch += '</var>'
+    batch += '</function_call>'
+    batch += '</request>'
+    batch += '</module>'
+    
+    batch += '<module name="reboot">'
+    batch += '<request API_version="1.0">'
+    batch += '<function_call name="reboot_now"/>'
+    batch += '</request>'
+    batch += '</module>'
+    
+    batch += '<module name="cluster">'
+    batch += '<request API_version="1.0">'
+    batch += '<function_call name="set_cluster.conf">'
+    batch += '<var mutable="false" name="propagate" type="boolean" value="false"/>'
+    batch += '<var mutable="false" name="cluster.conf" type="xml">'
+    batch += '<cluster config_version="1" name="' + cluster_name + '">'
+    batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
+
+    batch += '<clusternodes>'
+    for i in nodeList:
+        batch += '<clusternode name="' + i + '" votes="1" />'
+    batch += '</clusternodes>'
+
+    batch += '<cman/>'
+    batch += '<fencedevices/>'
+    batch += '<rm/>'
+    batch += '</cluster>'
+    batch += '</var>'
+    batch += '</function_call>'
+    batch += '</request>'
+    batch += '</module>'
+    
+    batch += '<module name="cluster">'
+    batch += '<request API_version="1.0">'
+    batch += '<function_call name="start_node" cluster_startup="true"/>'
+    batch += '</request>'
+    batch += '</module>'
+    batch += '</batch>'



^ permalink raw reply	[flat|nested] 185+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
@ 2006-07-19 20:19 rmccabe
  0 siblings, 0 replies; 185+ messages in thread
From: rmccabe @ 2006-07-19 20:19 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-07-19 20:19:53

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py 

Log message:
	cluster create, and cluster remove (stop managing) bits

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.13&r2=1.14

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/14 16:00:26	1.2
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/07/19 20:19:53	1.3
@@ -14,12 +14,111 @@
 #then only display chooser if the current user has 
 #permissions on at least one. If the user is admin, show ALL clusters
 
-                          
+from homebase_adapters import nodeAuth, nodeUnauth                     
 
 CLUSTER_FOLDER_PATH = '/luci/systems/cluster/'
 
+def validatePost(request):
+	if int(request.form['pagetype']) != 6:
+		return
+
+	errors = list()
+	nodeList = list()
+	nodeHash = {}
+	rnodeHash = {}
+	oldNodeHash = {}
+	oldRnodeHash = {}
+	requestResults = {}
+
+ 	sessionData = request.SESSION.get('checkRet')
+	if sessionData and 'requestResults' in sessionData:
+		requestResults = sessionData['requestResults']
+		if 'nodeHash' in requestResults:
+			oldNodeHash = requestResults['nodeHash']
+		if 'rnodeHash' in requestResults:
+			oldRnodeHash = requestResults['rnodeHash']
+
+	if not 'clusterName' in request.form or not request.form['clusterName']:
+		return (False, {'errors': [ 'No cluster name was specified.' ] })
+	clusterName = request.form['clusterName']
+
+	try:
+		numStorage = int(request.form['numStorage'])
+	except:
+		return (False, { 'errors': ['Unknown number of systems entered'], 'requestResults': requestResults })
+
+	if numStorage < 2:
+		return (False, { 'errors': ['A cluster must contain@least two nodes'], 'requestResults': requestResults })
+
+	i = 0
+	while i < numStorage:
+		try:
+			sysData = request.form['__SYSTEM' + str(i)]
+			if not sysData or sysData[0] == '':
+				raise
+
+			if len(sysData) < 2 or sysData[1] == '':
+				errors.append('No password was specified for host \"' + sysData[0] + '\"')
+				raise
+		except:
+			i += 1
+			continue
+
+		if len(sysData) > 1:
+			node = nodeAuth(None, sysData[0], sysData[1])
+
+			if oldRnodeHash and node['ricci_host'] in oldRnodeHash:
+				oldNode = oldRnodeHash[node['ricci_host']]
+			elif oldNodeHash and node['host'] in nodeHash:
+				oldNode = oldNodeHash[node['host']]
+			else:
+				oldNode = None
+
+			if 'errors' in node:
+				errors.append(node['errors'])
+				node['errors'] = True
+
+			if node['host'] in nodeHash or node['ricci_host'] in rnodeHash:
+				node['errors'] = True
+				errors.append('You added the node \"' + node['host'] + '\" more than once')
+			else:
+				if oldNode and 'prev_auth' in oldNode:
+					node['prev_auth'] = oldNode['prev_auth']
+
+				nodeHash[node['host']] = node
+				rnodeHash[node['ricci_host']] = node
+				nodeList.append(node)
+		i += 1
+
+	if len(nodeList) < 2:
+		errors.append('A cluster must contain@least two nodes')
+
+	sfn = lambda x, y: \
+		x['cur_auth'] - y['cur_auth'] or (('errors' in y) - ('errors' in x))
+	nodeList.sort(sfn)
+
+	dfn = lambda x: not 'cur_auth' in x or x['cur_auth'] != True
+	cluster_properties = {
+		'clusterName': clusterName,
+		'nodeList': nodeList,
+		'nodeHash': nodeHash,
+		'rnodeHash': rnodeHash,
+		'isComplete': len(errors) < 1 and len(filter(dfn, nodeList)) == 0
+	}
+	return (len(errors) < 1, {'errors': errors, 'requestResults': cluster_properties })
+
 def createCluChooser(self, request, systems):
   dummynode = {}
+  
+  if request.REQUEST_METHOD == 'POST':
+    ret = validatePost(request)
+    try:
+		request.SESSION.set('checkRet', ret[1])
+    except:
+		request.SESSION.set('checkRet', {})
+  else:
+    try: request.SESSION.set('checkRet', {})
+    except: pass
 
   #First, see if a cluster is chosen, then 
   #check that the current user can access that system
@@ -39,6 +138,7 @@
   except KeyError, e:
     pagetype = "0"
 
+
   cldata = {}
   cldata['Title'] = "Cluster List"
   cldata['cfg_type'] = "clusters"
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/07/18 19:26:03	1.13
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/07/19 20:19:53	1.14
@@ -174,8 +174,13 @@
 			systemName = host
 		node = { 'host': host, 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': True }
 		cluster_info = rc.cluster_info()
-		if not cluster_info or cluster_info[0] != cluster:
-			node['errors'] = 'Node ' + host + ' is reporting it is not a member of cluster \"' + cluster + '\"'
+		if not cluster or not cluster_info or cluster_info[0] != cluster:
+			if cluster:
+				node['errors'] = 'Node \"' + host + '\" is reporting it is not a member of cluster \"' + cluster + '\"'
+				if cluster_info[0]:
+					node['errors'] += ' and that it is a member of cluster \"' + cluster_info[0] + '\"'
+			else:
+				node['errors'] = 'Node \"' + host + '\" reports it is a member of cluster \"' + cluster_info[0] + '\"'
 		return node
 
 	error = 'Unable to authenticate to the ricci agent on \"' + host + '\"'
@@ -563,21 +568,20 @@
 	except:
 		sessionData = None
 
-
 	if 'ACTUAL_URL' in request:
 		url = request['ACTUAL_URL']
 	else:
 		url = '.'
 
 	if 'pagetype' in request.form:
-		pagetype = request.form['pagetype']
+		pagetype = int(request.form['pagetype'])
 	else:
 		try: request.SESSION.set('checkRet', {})
 		except: pass
 		return homebasePortal(self, request, '.', '0')
 
 	try:
-		validatorFn = formValidators[int(request.form['pagetype']) - 1]
+		validatorFn = formValidators[pagetype - 1]
 	except:
 		try: request.SESSION.set('checkRet', {})
 		except: pass
@@ -887,6 +891,12 @@
 	except:
 		return 'Unable to set permissions on new system \"' + host + '\"'
 
+def abortManageCluster(self):
+	try:
+		sessionData = request.SESSION.get('checkRet')
+		nodeUnauth(sessionData['requestResults']['nodeList'])
+	except: pass
+
 def manageCluster(self, clusterName, nodeList):
 	clusterName = str(clusterName)
 
@@ -920,11 +930,6 @@
 		except: pass
 		return 'Unable to set permissions on new cluster \"' + clusterName + '\"-- Cluster creation failed'
 
-	try:
-		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
-	except:
-		ssystem = None
-
 	for i in nodeList:
 		if 'ricci_host' in i:
 			host = str(i['ricci_host'])
@@ -938,25 +943,39 @@
 				raise 'not there'
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
-		except Exception, e:
+		except:
 			nodeUnauth(nodeList)
 			try: clusters.manage_delObjects([clusterName])
 			except: pass
 			return 'Unable to create cluster node \"' + host + '\" for cluster \"' + clusterName + '\" -- Cluster creation failed."'
 
-		if ssystem:
-			try:
-				# It's already there, as a storage system, no problem.
-				exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
-				continue
-			except: pass
+	try:
+		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+		if not ssystem:
+			raise
+	except:
+		return
 
-			try:
-				ssystem.manage_addFolder(host, '__luci__:system')
-				newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
-				newSystem.manage_acquiredPermissions([])
-				newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
-			except: pass
+	# Only add storage systems if the cluster and cluster node DB
+	# objects were added successfully.
+	for i in nodeList:
+		if 'ricci_host' in i:
+			host = str(i['ricci_host'])
+		else:
+			host = str(i['host'])
+
+		try:
+			# It's already there, as a storage system, no problem.
+			exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			continue
+		except: pass
+
+		try:
+			ssystem.manage_addFolder(host, '__luci__:system')
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except: pass
 
 def delSystem(self, systemName):
 	try:
@@ -964,28 +983,25 @@
 	except:
 		return 'Unable to find storage system \"' + systemName + '\"'
 
-	rc = RicciCommunicator(systemName)
-	cluster_info = rc.cluster_info()
-
 	try:
-		rc.unauth()
-	except Exception, e:
-		e = str(e)
-		if str(e) != '5':
-			# If it's simply a case where we're not authed in the first
-			# place, an attempt to unauthorize failing isn't a problem.
-			return 'Unable to unauthenticate to storage system \"' + systemName + '\"'
-		pass
+		rc = RicciCommunicator(systemName)
+		if not rc:
+			raise
+	except:
+		return 'Unable to connect to the ricci agent on \"' + systemName + '\" to unauthenticate'
 
-	if len(cluster_info) > 0 and cluster_info[0] != '':
+	# Only unauthenticate if the system isn't a member of
+	# a managed cluster.
+	cluster_info = rc.cluster_info()
+	if not cluster_info[0]:
+		try: rc.unauth()
+		except: pass
+	else:
 		try:
-			delClusterSystem(self, str(cluster_info[0]), systemName)
-		except KeyError:
-			# The cluster may have been deleted, but the system
-			# may still exist in the storage dir.
-			pass
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + cluster_info[0] + '/' + rc.system_name())
 		except:
-			return 'Unable to delete cluster storage system \"' + systemName + '\"'
+			try: rc.unauth()
+			except: pass
 
 	try:
 		ssystem.manage_delObjects([systemName])
@@ -1007,8 +1023,7 @@
 	except:
 		return 'Unable to delete cluster \"' + clusterName + '\"'
 
-def delClusterSystem(self, clusterName, systemName):
-	cluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+def delClusterSystem(self, cluster, systemName):
 	try:
 		if not self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + systemName):
 			raise
@@ -1029,10 +1044,9 @@
 	errors = ''
 	for i in csystems:
 		try:
-			cluster.manage_delObjects([i])
-			delClusterSystem(i)
+			delClusterSystem(self, cluster, i[0])
 		except:
-			errors += 'Unable to delete the cluster system \"' + i + '\"\n'
+			errors += 'Unable to delete the cluster system \"' + i[0] + '\"\n'
 
 	return errors
 



^ permalink raw reply	[flat|nested] 185+ messages in thread

end of thread, other threads:[~2008-07-17 16:36 UTC | newest]

Thread overview: 185+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-02-13 19:50 [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte rmccabe
  -- strict thread matches above, loose matches on Subject: below --
2008-07-17 16:36 rmccabe
2008-04-18 20:37 rmccabe
2007-12-12 15:45 rmccabe
2007-08-23 19:00 rmccabe
2007-08-22 20:57 rmccabe
2007-05-03 19:51 rmccabe
2007-04-02 16:35 rmccabe
2007-04-02 15:56 rmccabe
2007-03-27  2:03 rmccabe
2007-03-16  3:19 rmccabe
2007-03-16  3:19 rmccabe
2007-03-16  3:19 rmccabe
2007-03-13  3:07 rmccabe
2007-03-13  3:06 rmccabe
2007-03-12  5:47 rmccabe
2007-03-12  5:46 rmccabe
2007-03-12  5:46 rmccabe
2007-03-06 22:48 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-02-13 19:50 rmccabe
2007-02-12 20:25 rmccabe
2007-02-12 20:24 rmccabe
2007-02-07 22:00 rmccabe
2007-02-07 21:30 rmccabe
2007-02-05 19:56 rmccabe
2007-01-31 23:45 rmccabe
2007-01-31 19:28 rmccabe
2007-01-31 18:50 rmccabe
2007-01-30 21:41 jparsons
2007-01-30 21:21 jparsons
2007-01-30 21:05 jparsons
2007-01-29 23:30 rmccabe
2007-01-26 19:35 rmccabe
2007-01-18  2:48 rmccabe
2007-01-17 22:26 rmccabe
2007-01-17 22:14 rmccabe
2007-01-10 23:33 jparsons
2007-01-10 22:45 rmccabe
2007-01-10 20:06 rmccabe
2006-12-20 20:40 jparsons
2006-12-14 21:37 rmccabe
2006-12-14 17:03 rmccabe
2006-12-08 23:02 rmccabe
2006-11-30 20:12 jparsons
2006-11-27 21:06 rmccabe
2006-11-27 21:05 rmccabe
2006-11-27 18:15 rmccabe
2006-11-20 23:32 rmccabe
2006-11-20 15:05 jparsons
2006-11-17  5:50 rmccabe
2006-11-17  5:48 rmccabe
2006-11-10 18:18 rmccabe
2006-11-10 17:59 rmccabe
2006-11-09 22:30 rmccabe
2006-11-09 14:17 rmccabe
2006-11-08 21:42 rmccabe
2006-11-08 15:52 jparsons
2006-11-07 20:14 jparsons
2006-11-07 20:13 jparsons
2006-11-07  2:36 jparsons
2006-11-07  1:32 jparsons
2006-11-06 23:55 rmccabe
2006-11-05  0:59 rmccabe
2006-11-03 21:13 jparsons
2006-11-03  1:24 rmccabe
2006-11-03  1:08 rmccabe
2006-11-02 20:58 rmccabe
2006-11-02 20:45 rmccabe
2006-11-02 20:41 rmccabe
2006-11-02  3:17 rmccabe
2006-10-31 17:18 rmccabe
2006-10-31  0:16 rmccabe
2006-10-30 22:52 rmccabe
2006-10-30 20:43 jparsons
2006-10-27  1:11 rmccabe
2006-10-25  0:43 rmccabe
2006-10-24 14:08 rmccabe
2006-10-23 20:47 jparsons
2006-10-20 22:09 rmccabe
2006-10-20 21:59 rmccabe
2006-10-19 14:57 rmccabe
2006-10-18 23:12 rmccabe
2006-10-18 19:16 rmccabe
2006-10-16 21:01 rmccabe
2006-10-16 20:51 jparsons
2006-10-16 19:17 jparsons
2006-10-16  5:28 rmccabe
2006-10-16  4:54 rmccabe
2006-10-16  4:51 rmccabe
2006-10-13 22:56 rmccabe
2006-10-12 22:11 jparsons
2006-10-12 21:00 kupcevic
2006-10-12 20:54 jparsons
2006-10-12 20:48 jparsons
2006-10-12 19:40 rmccabe
2006-10-12 17:27 jparsons
2006-10-12 17:08 jparsons
2006-10-12 15:50 jparsons
2006-10-12 15:45 jparsons
2006-10-12  0:04 jparsons
2006-10-11 23:56 jparsons
2006-10-11 23:11 jparsons
2006-10-11 23:08 rmccabe
2006-10-11 22:37 jparsons
2006-10-11 20:58 jparsons
2006-10-11 17:43 jparsons
2006-10-11 17:29 rmccabe
2006-10-11 16:35 jparsons
2006-10-11 16:25 jparsons
2006-10-11 16:18 rmccabe
2006-10-10 21:33 kupcevic
2006-10-09 20:21 rmccabe
2006-10-04 16:20 rmccabe
2006-10-04 16:05 jparsons
2006-10-04 15:11 jparsons
2006-10-02 22:30 rmccabe
2006-10-02 21:42 rmccabe
2006-10-02 21:09 rmccabe
2006-10-02 20:53 rmccabe
2006-09-28 22:04 rmccabe
2006-09-28 20:10 rmccabe
2006-09-27 18:46 rmccabe
2006-09-27 16:18 jparsons
2006-09-27 15:51 jparsons
2006-09-27 15:35 jparsons
2006-09-25 22:59 rmccabe
2006-09-22 18:24 rmccabe
2006-08-30 22:59 rmccabe
2006-08-22 17:46 jparsons
2006-08-22 17:41 jparsons
2006-08-16 23:40 jparsons
2006-08-16 21:56 jparsons
2006-08-16 21:54 jparsons
2006-08-16 21:51 jparsons
2006-08-16 19:14 rmccabe
2006-08-16 16:10 jparsons
2006-08-14 15:12 jparsons
2006-08-13 19:38 jparsons
2006-08-13 19:37 jparsons
2006-08-13 18:36 jparsons
2006-08-13 16:32 jparsons
2006-08-13 16:15 jparsons
2006-08-13 15:02 jparsons
2006-08-13 14:57 jparsons
2006-08-13 13:48 jparsons
2006-08-12 21:13 jparsons
2006-08-12 20:31 jparsons
2006-08-12 18:22 jparsons
2006-08-12 17:53 jparsons
2006-08-11  0:29 jparsons
2006-08-10 23:06 shuennek
2006-08-10 16:50 jparsons
2006-08-10 14:16 jparsons
2006-08-09 22:05 jparsons
2006-08-09 21:48 jparsons
2006-08-03 13:37 jparsons
2006-08-02 18:59 rmccabe
2006-08-02 17:25 rmccabe
2006-08-01 15:29 jparsons
2006-08-01 15:25 jparsons
2006-08-01 15:20 jparsons
2006-08-01 15:13 jparsons
2006-08-01 15:04 jparsons
2006-07-31 18:21 rmccabe
2006-07-28 19:03 jparsons
2006-07-28 18:57 jparsons
2006-07-28 18:40 jparsons
2006-07-28 14:16 jparsons
2006-07-28 14:02 jparsons
2006-07-28 11:46 jparsons
2006-07-27 16:34 jparsons
2006-07-27 15:53 rmccabe
2006-07-25 20:16 jparsons
2006-07-25 20:01 jparsons
2006-07-25  0:56 jparsons
2006-07-24 21:51 jparsons
2006-07-24 21:13 jparsons
2006-07-24 19:50 jparsons
2006-07-19 22:28 rmccabe
2006-07-19 21:38 rmccabe
2006-07-19 20:57 rmccabe
2006-07-19 20:19 rmccabe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.