All of lore.kernel.org
 help / color / mirror / Atom feed
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-21 19:57 Dominick Grift
  2010-09-21 20:04 ` Jeremy Solt
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-21 19:57 UTC (permalink / raw)
  To: refpolicy


Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 6cc0049... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   40 ++++
 policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
 4 files changed, 638 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..d88b5ff
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,40 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_t,s0)
+
+/var/lock/subsys/hadoop-datanode	--	gen_context(system_u:object_r:hadoop_datanode_initrc_lock_t,s0)
+/var/lock/subsys/hadoop-namenode	--	gen_context(system_u:object_r:hadoop_namenode_initrc_lock_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop								-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-datanode.pid	--	gen_context(system_u:object_r:hadoop_datanode_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-namenode.pid	--	gen_context(system_u:object_r:hadoop_namenode_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..6cc0049
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,247 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	corecmd_exec_all_executables(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..53a242b
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,347 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+# What or who runs this?
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_server_t)
+')
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/31440ada/attachment.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21 19:57 [refpolicy] [PATCH] hadoop 1/10 -- unconfined Dominick Grift
@ 2010-09-21 20:04 ` Jeremy Solt
  2010-09-23 13:13   ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Jeremy Solt @ 2010-09-21 20:04 UTC (permalink / raw)
  To: refpolicy

On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
> Signed-off-by: Dominick Grift <domg472@gmail.com>
Thanks Dominick. 

Paul, are you going to include all of his changes in your patch set?

> ---
> :100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
> :000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
> :000000 100644 0000000... 6cc0049... A	policy/modules/services/hadoop.if
> :000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
>  policy/modules/kernel/corenetwork.te.in |    4 +
>  policy/modules/services/hadoop.fc       |   40 ++++
>  policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
>  policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
>  4 files changed, 638 insertions(+), 0 deletions(-)
> 


-- 
Jeremy J. Solt
Tresys Technology, LLC
410-290-1411 x122

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21 20:04 ` Jeremy Solt
@ 2010-09-23 13:13   ` Paul Nuzzi
  2010-09-24 14:20     ` Jeremy Solt
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-23 13:13 UTC (permalink / raw)
  To: refpolicy

On 09/21/2010 04:04 PM, Jeremy Solt wrote:
> On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
>> Signed-off-by: Dominick Grift <domg472@gmail.com>
> Thanks Dominick. 
> 
> Paul, are you going to include all of his changes in your patch set?

I guess it depends how we want to structure the patch.  Do we want to upstream 9 different
modules or one monolithic one?  

>> ---
>> :100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
>> :000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
>> :000000 100644 0000000... 6cc0049... A	policy/modules/services/hadoop.if
>> :000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
>>  policy/modules/kernel/corenetwork.te.in |    4 +
>>  policy/modules/services/hadoop.fc       |   40 ++++
>>  policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
>>  policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
>>  4 files changed, 638 insertions(+), 0 deletions(-)
>>
> 
> 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-23 13:13   ` Paul Nuzzi
@ 2010-09-24 14:20     ` Jeremy Solt
  2010-09-27 18:50       ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Jeremy Solt @ 2010-09-24 14:20 UTC (permalink / raw)
  To: refpolicy

On Thu, 2010-09-23 at 09:13 -0400, Paul Nuzzi wrote:
> On 09/21/2010 04:04 PM, Jeremy Solt wrote:
> > On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
> >> Signed-off-by: Dominick Grift <domg472@gmail.com>
> > Thanks Dominick. 
> > 
> > Paul, are you going to include all of his changes in your patch set?
> 
> I guess it depends how we want to structure the patch.  Do we want to upstream 9 different
> modules or one monolithic one?  
> 

Does it make sense to have any of these modules without the rest of
hadoop? I see that zookeeper is a subproject of hadoop. Could it be used
separately or is it only used with hadoop systems? If they're all
dependent on each other, then I think they should be in one module.


> >> ---
> >> :100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
> >> :000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
> >> :000000 100644 0000000... 6cc0049... A	policy/modules/services/hadoop.if
> >> :000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
> >>  policy/modules/kernel/corenetwork.te.in |    4 +
> >>  policy/modules/services/hadoop.fc       |   40 ++++
> >>  policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
> >>  policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
> >>  4 files changed, 638 insertions(+), 0 deletions(-)
> >>



-- 
Jeremy J. Solt
Tresys Technology, LLC
410-290-1411 x122

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-24 14:20     ` Jeremy Solt
@ 2010-09-27 18:50       ` Paul Nuzzi
  2010-09-30 19:39         ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-27 18:50 UTC (permalink / raw)
  To: refpolicy

On 09/24/2010 10:20 AM, Jeremy Solt wrote:
> On Thu, 2010-09-23 at 09:13 -0400, Paul Nuzzi wrote:
>> On 09/21/2010 04:04 PM, Jeremy Solt wrote:
>>> On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
>>>> Signed-off-by: Dominick Grift <domg472@gmail.com>
>>> Thanks Dominick. 
>>>
>>> Paul, are you going to include all of his changes in your patch set?
>>
>> I guess it depends how we want to structure the patch.  Do we want to upstream 9 different
>> modules or one monolithic one?  
>>
> 
> Does it make sense to have any of these modules without the rest of
> hadoop? I see that zookeeper is a subproject of hadoop. Could it be used
> separately or is it only used with hadoop systems? If they're all
> dependent on each other, then I think they should be in one module.
 
Keeping it all together is fine.  The module could be split if a sysadmin
decides to run HDFS without zookeeper.  Not a big deal. I will continue to 
port it to one monolithic patch.

>>>> ---
>>>> :100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
>>>> :000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
>>>> :000000 100644 0000000... 6cc0049... A	policy/modules/services/hadoop.if
>>>> :000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
>>>>  policy/modules/kernel/corenetwork.te.in |    4 +
>>>>  policy/modules/services/hadoop.fc       |   40 ++++
>>>>  policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
>>>>  policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
>>>>  4 files changed, 638 insertions(+), 0 deletions(-)
>>>>
> 
> 
> 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-27 18:50       ` Paul Nuzzi
@ 2010-09-30 19:39         ` Paul Nuzzi
  2010-10-01 12:02           ` Dominick Grift
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-30 19:39 UTC (permalink / raw)
  To: refpolicy

I updated the patch based on recommendations from the mailing list.
All of hadoop's services are included in one module instead of 
individual ones.  Unconfined and sysadm roles are given access to 
hadoop and zookeeper client domain transitions. The services are started
using run_init.  Let me know what you think.

 
Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

---
 policy/modules/kernel/corenetwork.te.in |    4 
 policy/modules/roles/sysadm.te          |    8 
 policy/modules/services/hadoop.fc       |   53 ++++
 policy/modules/services/hadoop.if       |  336 +++++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  367 ++++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 
 6 files changed, 776 insertions(+)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index 2a19751..7954580 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -397,6 +401,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5bdd554
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,53 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid		--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid		--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..0e5bb28
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,336 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+	type_transition hadoop_$1_t hadoop_var_lib_t:file hadoop_$1_var_lib_t;
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+	type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	consoletype_exec(hadoop_$1_initrc_t)
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+	files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
+	files_manage_generic_tmp_files(hadoop_$1_initrc_t)
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	hadoop_rx_etc(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+	allow hadoop_$1_t self:fifo_file { read write getattr ioctl };
+	allow hadoop_$1_t self:process execmem;
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+	files_manage_generic_tmp_files(hadoop_$1_t)
+	files_manage_generic_tmp_dirs(hadoop_$1_t)
+	files_read_etc_files(hadoop_$1_t)
+	files_read_var_lib_files(hadoop_$1_t)
+	files_search_pids(hadoop_$1_t)
+
+	hadoop_rx_etc(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	logging_search_logs(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	miscfiles_read_localization(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+
+	sysnet_read_config(hadoop_$1_t)
+
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to access hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read and execute permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_rx_etc', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file { read getattr };
+	allow $1 hadoop_etc_t:file { read_file_perms execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..1a573ea
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,367 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
+files_manage_generic_tmp_dirs(hadoop_t)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+fs_associate(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+files_manage_generic_tmp_dirs(zookeeper_t)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+files_manage_generic_tmp_files(zookeeper_server_t)
+files_manage_generic_tmp_dirs(zookeeper_server_t)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..ac27264 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-30 19:39         ` Paul Nuzzi
@ 2010-10-01 12:02           ` Dominick Grift
  2010-10-01 15:17             ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-10-01 12:02 UTC (permalink / raw)
  To: refpolicy

On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
> I updated the patch based on recommendations from the mailing list.
> All of hadoop's services are included in one module instead of 
> individual ones.  Unconfined and sysadm roles are given access to 
> hadoop and zookeeper client domain transitions. The services are started
> using run_init.  Let me know what you think.

Why do some hadoop domain need to manage generic tmp?

files_manage_generic_tmp_dirs(zookeeper_t)
files_manage_generic_tmp_dirs(hadoop_t)
files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
files_manage_generic_tmp_files(hadoop_$1_initrc_t)
files_manage_generic_tmp_files(hadoop_$1_t)
files_manage_generic_tmp_dirs(hadoop_$1_t)

You probably need:

files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.

Can use rw_fifo_file_perms here:

allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };

Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.

hadoop_rx_etc(hadoop_$1_initrc_t)

This seems wrong. Why does it need that? use files_search_var_lib() if possible:

files_read_var_lib_files(hadoop_$1_t)

This is not a declaration and might want to use filetrans_pattern() instead:

type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;

Other then the above, there are some style issues:

http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide

But i can help clean that up once above issues are resolved.


> 
>  
> Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>
> 
> ---
>  policy/modules/kernel/corenetwork.te.in |    4 
>  policy/modules/roles/sysadm.te          |    8 
>  policy/modules/services/hadoop.fc       |   53 ++++
>  policy/modules/services/hadoop.if       |  336 +++++++++++++++++++++++++++++
>  policy/modules/services/hadoop.te       |  367 ++++++++++++++++++++++++++++++++
>  policy/modules/system/unconfined.te     |    8 
>  6 files changed, 776 insertions(+)
> 
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..7a1b5de 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
>  network_port(git, tcp,9418,s0, udp,9418,s0)
>  network_port(gopher, tcp,70,s0, udp,70,s0)
>  network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
>  network_port(hddtemp, tcp,7634,s0)
>  network_port(howl, tcp,5335,s0, udp,5353,s0)
>  network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
>  network_port(xen, tcp,8002,s0)
>  network_port(xfs, tcp,7100,s0)
>  network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
>  network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
>  network_port(zope, tcp,8021,s0)
>  
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index 2a19751..7954580 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
>  ')
>  
>  optional_policy(`
> +	hadoop_run(sysadm_t, sysadm_r)
> +')
> +
> +optional_policy(`
>  	# allow system administrator to use the ipsec script to look
>  	# at things (e.g., ipsec auto --status)
>  	# probably should create an ipsec_admin role for this kind of thing
> @@ -397,6 +401,10 @@ optional_policy(`
>  	yam_run(sysadm_t, sysadm_r)
>  ')
>  
> +optional_policy(`
> +	zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
>  ifndef(`distro_redhat',`
>  	optional_policy(`
>  		auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..5bdd554
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,53 @@
> +/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid		--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid		--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..0e5bb28
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,336 @@
> +## <summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +## <summary>
> +##	The template to define a hadoop domain.
> +## </summary>
> +## <param name="domain_prefix">
> +##	<summary>
> +##	Domain prefix to be used.
> +##	</summary>
> +## </param>
> +#
> +template(`hadoop_domain_template',`
> +	gen_require(`
> +		attribute hadoop_domain;
> +		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> +		type hadoop_exec_t;
> +	')
> +
> +	########################################
> +	#
> +	# Shared declarations.
> +	#
> +
> +	type hadoop_$1_t, hadoop_domain;
> +	domain_type(hadoop_$1_t)
> +	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> +	type hadoop_$1_initrc_t;
> +	type hadoop_$1_initrc_exec_t;
> +	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> +	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> +	type hadoop_$1_lock_t;
> +	files_lock_file(hadoop_$1_lock_t)
> +	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> +	type hadoop_$1_log_t;
> +	logging_log_file(hadoop_$1_log_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> +	type hadoop_$1_var_lib_t;
> +	files_type(hadoop_$1_var_lib_t)
> +	type_transition hadoop_$1_t hadoop_var_lib_t:file hadoop_$1_var_lib_t;
> +
> +	type hadoop_$1_initrc_var_run_t;
> +	files_pid_file(hadoop_$1_initrc_var_run_t)
> +	type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
> +
> +	type hadoop_$1_tmp_t;
> +	files_tmp_file(hadoop_$1_tmp_t)
> +	files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 initrc policy.
> +	#
> +
> +	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> +	allow hadoop_$1_initrc_t self:capability { setuid setgid };
> +	allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
> +	allow hadoop_$1_initrc_t self:process setsched;
> +
> +	consoletype_exec(hadoop_$1_initrc_t)
> +	corecmd_exec_bin(hadoop_$1_initrc_t)
> +	corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> +	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> +	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> +	files_read_etc_files(hadoop_$1_initrc_t)
> +	files_read_usr_files(hadoop_$1_initrc_t)
> +	files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> +	files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> +	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> +	hadoop_rx_etc(hadoop_$1_initrc_t)
> +
> +	init_rw_utmp(hadoop_$1_initrc_t)
> +	init_use_script_ptys(hadoop_$1_initrc_t)
> +
> +	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> +	kernel_read_sysctl(hadoop_$1_initrc_t)
> +	kernel_read_system_state(hadoop_$1_initrc_t)
> +
> +	logging_send_syslog_msg(hadoop_$1_initrc_t)
> +	logging_send_audit_msgs(hadoop_$1_initrc_t)
> +	logging_search_logs(hadoop_$1_initrc_t)
> +
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +	miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_initrc_t)
> +	')
> +
> +	term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> +	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> +	# This can be removed on anything post-el5
> +	libs_use_ld_so(hadoop_$1_initrc_t)
> +	libs_use_shared_libs(hadoop_$1_initrc_t)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 policy.
> +	#
> +
> +	allow hadoop_$1_t hadoop_domain:process signull;
> +	allow hadoop_$1_t self:fifo_file { read write getattr ioctl };
> +	allow hadoop_$1_t self:process execmem;
> +	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> +	corecmd_exec_bin(hadoop_$1_t)
> +	corecmd_exec_shell(hadoop_$1_t)
> +
> +	dev_read_rand(hadoop_$1_t)
> +	dev_read_urand(hadoop_$1_t)
> +	dev_read_sysfs(hadoop_$1_t)
> +	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +	files_manage_generic_tmp_files(hadoop_$1_t)
> +	files_manage_generic_tmp_dirs(hadoop_$1_t)
> +	files_read_etc_files(hadoop_$1_t)
> +	files_read_var_lib_files(hadoop_$1_t)
> +	files_search_pids(hadoop_$1_t)
> +
> +	hadoop_rx_etc(hadoop_$1_t)
> +
> +	java_exec(hadoop_$1_t)
> +
> +	kernel_read_network_state(hadoop_$1_t)
> +	kernel_read_system_state(hadoop_$1_t)
> +
> +	logging_search_logs(hadoop_$1_t)
> +
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> +	miscfiles_read_localization(hadoop_$1_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_t)
> +	')
> +
> +	sysnet_read_config(hadoop_$1_t)
> +
> +	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> +	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> +	corenet_all_recvfrom_netlabel(hadoop_$1_t)
> +	corenet_tcp_bind_all_nodes(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> +	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> +	corenet_tcp_connect_generic_port(hadoop_$1_t)
> +
> +	allow hadoop_$1_t self:udp_socket create_socket_perms;
> +	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> +	corenet_udp_bind_all_nodes(hadoop_$1_t)
> +
> +	# This can be removed on anything post-el5
> +	libs_use_ld_so(hadoop_$1_t)
> +	libs_use_shared_libs(hadoop_$1_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute hadoop in the
> +##	hadoop domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_domtrans',`
> +	gen_require(`
> +		type hadoop_t, hadoop_exec_t;
> +	')
> +
> +	files_search_usr($1)
> +	libs_search_lib($1)
> +	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute hadoop in the hadoop domain,
> +##	and allow the specified role the
> +##	hadoop domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +## <param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`hadoop_run',`
> +	gen_require(`
> +		type hadoop_t;
> +	')
> +
> +	hadoop_domtrans($1)
> +	role $2 types hadoop_t;
> +
> +	allow $1 hadoop_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_client',`
> +	gen_require(`
> +		type zookeeper_t, zookeeper_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper server in the
> +##	zookeeper server domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_t, zookeeper_server_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper server in the
> +##	zookeeper domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`zookeeper_initrc_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_initrc_exec_t;
> +	')
> +
> +	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain, and allow the
> +##	specified role the zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +## <param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`zookeeper_run_client',`
> +	gen_require(`
> +		type zookeeper_t;
> +	')
> +
> +	zookeeper_domtrans_client($1)
> +	role $2 types zookeeper_t;
> +
> +	allow $1 zookeeper_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +##  Give permission to a domain to access hadoop_etc_t
> +## </summary>
> +## <param name="domain">
> +##  <summary>
> +##  Domain needing read and execute permission
> +##  </summary>
> +## </param>
> +#
> +interface(`hadoop_rx_etc', `
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	allow $1 hadoop_etc_t:dir search_dir_perms;
> +	allow $1 hadoop_etc_t:lnk_file { read getattr };
> +	allow $1 hadoop_etc_t:file { read_file_perms execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..1a573ea
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,367 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
> +files_manage_generic_tmp_dirs(hadoop_t)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +corenet_tcp_connect_generic_port(hadoop_t)
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(hadoop_t)
> +libs_use_shared_libs(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> +	nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> +	nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +fs_associate(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
> +files_manage_generic_tmp_dirs(zookeeper_t)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +corenet_tcp_connect_generic_port(zookeeper_t)
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_t)
> +libs_use_shared_libs(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> +	nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
> +files_manage_generic_tmp_files(zookeeper_server_t)
> +files_manage_generic_tmp_dirs(zookeeper_server_t)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_server_t)
> +libs_use_shared_libs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..ac27264 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
>  ')
>  
>  optional_policy(`
> +	hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
>  	inn_domtrans(unconfined_t)
>  ')
>  
> @@ -210,6 +214,10 @@ optional_policy(`
>  	xserver_domtrans(unconfined_t)
>  ')
>  
> +optional_policy(`
> +	zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
>  ########################################
>  #
>  # Unconfined Execmem Local policy
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101001/232fbbe6/attachment-0001.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-01 12:02           ` Dominick Grift
@ 2010-10-01 15:17             ` Paul Nuzzi
  2010-10-01 17:56               ` Christopher J. PeBenito
  2010-10-01 18:01               ` Dominick Grift
  0 siblings, 2 replies; 37+ messages in thread
From: Paul Nuzzi @ 2010-10-01 15:17 UTC (permalink / raw)
  To: refpolicy

On 10/01/2010 08:02 AM, Dominick Grift wrote:
> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>> I updated the patch based on recommendations from the mailing list.
>> All of hadoop's services are included in one module instead of 
>> individual ones.  Unconfined and sysadm roles are given access to 
>> hadoop and zookeeper client domain transitions. The services are started
>> using run_init.  Let me know what you think.
> 
> Why do some hadoop domain need to manage generic tmp?
> 
> files_manage_generic_tmp_dirs(zookeeper_t)
> files_manage_generic_tmp_dirs(hadoop_t)
> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> files_manage_generic_tmp_files(hadoop_$1_t)
> files_manage_generic_tmp_dirs(hadoop_$1_t)

This has to be done for Java JMX to work.  All of the files are written to
/tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
all the files for each service are labeled with hadoop_*_tmp_t.  The first service 
will end up owning the directory if it is not labeled tmp_t.

> You probably need:
> 
> files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
> becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.

> Can use rw_fifo_file_perms here:
> 
> allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
> 
> Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.
> 
> hadoop_rx_etc(hadoop_$1_initrc_t)
> 
> This seems wrong. Why does it need that? use files_search_var_lib() if possible:
> 
> files_read_var_lib_files(hadoop_$1_t)
> 
> This is not a declaration and might want to use filetrans_pattern() instead:
> 
> type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;

Changed.  Thanks for the comments.
 
> Other then the above, there are some style issues:
> 
> http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide
> 
> But i can help clean that up once above issues are resolved.
> 

Is there a style checking script for refpolicy patches similar to the Linux kernel?

 
Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

---
 policy/modules/kernel/corenetwork.te.in |    4 
 policy/modules/roles/sysadm.te          |    8 
 policy/modules/services/hadoop.fc       |   53 ++++
 policy/modules/services/hadoop.if       |  360 ++++++++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  360 ++++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 
 6 files changed, 793 insertions(+)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..b46b28b 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5bdd554
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,53 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid		--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid		--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..051e68c
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,360 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	consoletype_exec(hadoop_$1_initrc_t)
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+	files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
+	files_manage_generic_tmp_files(hadoop_$1_initrc_t)
+	files_search_pids(hadoop_$1_initrc_t)
+	files_search_locks(hadoop_$1_initrc_t)
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	hadoop_exec_config_files(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:process execmem;
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+	files_manage_generic_tmp_files(hadoop_$1_t)
+	files_manage_generic_tmp_dirs(hadoop_$1_t)
+	files_read_etc_files(hadoop_$1_t)
+	files_search_pids(hadoop_$1_t)
+	files_search_var_lib(hadoop_$1_t)
+
+	hadoop_exec_config_files(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	logging_search_logs(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	miscfiles_read_localization(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+
+	sysnet_read_config(hadoop_$1_t)
+
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to read
+##  hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_read_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file { read getattr };
+	allow $1 hadoop_etc_t:file read_file_perms;
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to
+##  execute hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read and execute
+##  permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_exec_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	hadoop_read_config_files($1)
+	allow $1 hadoop_etc_t:file { execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..6a66962
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,360 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
+files_manage_generic_tmp_dirs(hadoop_t)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+fs_associate(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+files_manage_generic_tmp_dirs(zookeeper_t)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+files_manage_generic_tmp_files(zookeeper_server_t)
+files_manage_generic_tmp_dirs(zookeeper_server_t)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..ac27264 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-01 15:17             ` Paul Nuzzi
@ 2010-10-01 17:56               ` Christopher J. PeBenito
  2010-10-04 17:15                 ` Paul Nuzzi
  2010-10-01 18:01               ` Dominick Grift
  1 sibling, 1 reply; 37+ messages in thread
From: Christopher J. PeBenito @ 2010-10-01 17:56 UTC (permalink / raw)
  To: refpolicy

On 10/01/10 11:17, Paul Nuzzi wrote:
> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>> I updated the patch based on recommendations from the mailing list.
>>> All of hadoop's services are included in one module instead of
>>> individual ones.  Unconfined and sysadm roles are given access to
>>> hadoop and zookeeper client domain transitions. The services are started
>>> using run_init.  Let me know what you think.
>>
>> Why do some hadoop domain need to manage generic tmp?
>>
>> files_manage_generic_tmp_dirs(zookeeper_t)
>> files_manage_generic_tmp_dirs(hadoop_t)
>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>> files_manage_generic_tmp_files(hadoop_$1_t)
>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>
> This has to be done for Java JMX to work.  All of the files are written to
> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
> all the files for each service are labeled with hadoop_*_tmp_t.  The first service
> will end up owning the directory if it is not labeled tmp_t.

The hsperfdata dir in /tmp certainly the bane of policy writers.  Based 
on a quick look through the policy, it looks like the only dir they 
create in /tmp is this hsperfdata dir.  I suggest you do something like

files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)

filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)

-- 
Chris PeBenito
Tresys Technology, LLC
www.tresys.com | oss.tresys.com

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-01 15:17             ` Paul Nuzzi
  2010-10-01 17:56               ` Christopher J. PeBenito
@ 2010-10-01 18:01               ` Dominick Grift
  2010-10-01 19:06                 ` Paul Nuzzi
  1 sibling, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-10-01 18:01 UTC (permalink / raw)
  To: refpolicy

On Fri, Oct 01, 2010 at 11:17:27AM -0400, Paul Nuzzi wrote:
> On 10/01/2010 08:02 AM, Dominick Grift wrote:
> > On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
> >> I updated the patch based on recommendations from the mailing list.
> >> All of hadoop's services are included in one module instead of 
> >> individual ones.  Unconfined and sysadm roles are given access to 
> >> hadoop and zookeeper client domain transitions. The services are started
> >> using run_init.  Let me know what you think.
> > 
> > Why do some hadoop domain need to manage generic tmp?
> > 
> > files_manage_generic_tmp_dirs(zookeeper_t)
> > files_manage_generic_tmp_dirs(hadoop_t)
> > files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> > files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> > files_manage_generic_tmp_files(hadoop_$1_t)
> > files_manage_generic_tmp_dirs(hadoop_$1_t)
> 
> This has to be done for Java JMX to work.  All of the files are written to
> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
> all the files for each service are labeled with hadoop_*_tmp_t.  The first service 
> will end up owning the directory if it is not labeled tmp_t.
> 
> > You probably need:
> > 
> > files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
> > becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.
> 
> > Can use rw_fifo_file_perms here:
> > 
> > allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
> > 
> > Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.
> > 
> > hadoop_rx_etc(hadoop_$1_initrc_t)
> > 
> > This seems wrong. Why does it need that? use files_search_var_lib() if possible:
> > 
> > files_read_var_lib_files(hadoop_$1_t)
> > 
> > This is not a declaration and might want to use filetrans_pattern() instead:
> > 
> > type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
> 
> Changed.  Thanks for the comments.
>  
> > Other then the above, there are some style issues:
> > 
> > http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide
> > 
> > But i can help clean that up once above issues are resolved.
> > 
> 
> Is there a style checking script for refpolicy patches similar to the Linux kernel?

Not that i am aware of.
Are you sure that your entries in hadoop.fc work? You could check by intentionally mislabel the paths and children with chcon and then see if restorecon restores everything properly

> 
>  
> Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>
> 
> ---
>  policy/modules/kernel/corenetwork.te.in |    4 
>  policy/modules/roles/sysadm.te          |    8 
>  policy/modules/services/hadoop.fc       |   53 ++++
>  policy/modules/services/hadoop.if       |  360 ++++++++++++++++++++++++++++++++
>  policy/modules/services/hadoop.te       |  360 ++++++++++++++++++++++++++++++++
>  policy/modules/system/unconfined.te     |    8 
>  6 files changed, 793 insertions(+)
> 
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..7a1b5de 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
>  network_port(git, tcp,9418,s0, udp,9418,s0)
>  network_port(gopher, tcp,70,s0, udp,70,s0)
>  network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
>  network_port(hddtemp, tcp,7634,s0)
>  network_port(howl, tcp,5335,s0, udp,5353,s0)
>  network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
>  network_port(xen, tcp,8002,s0)
>  network_port(xfs, tcp,7100,s0)
>  network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
>  network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
>  network_port(zope, tcp,8021,s0)
>  
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index cad05ff..b46b28b 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
>  ')
>  
>  optional_policy(`
> +	hadoop_run(sysadm_t, sysadm_r)
> +')
> +
> +optional_policy(`
>  	# allow system administrator to use the ipsec script to look
>  	# at things (e.g., ipsec auto --status)
>  	# probably should create an ipsec_admin role for this kind of thing
> @@ -392,6 +396,10 @@ optional_policy(`
>  	yam_run(sysadm_t, sysadm_r)
>  ')
>  
> +optional_policy(`
> +	zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
>  ifndef(`distro_redhat',`
>  	optional_policy(`
>  		auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..5bdd554
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,53 @@
> +/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid		--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid		--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..051e68c
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,360 @@
> +## <summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +## <summary>
> +##	The template to define a hadoop domain.
> +## </summary>
> +## <param name="domain_prefix">
> +##	<summary>
> +##	Domain prefix to be used.
> +##	</summary>
> +## </param>
> +#
> +template(`hadoop_domain_template',`
> +	gen_require(`
> +		attribute hadoop_domain;
> +		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> +		type hadoop_exec_t;
> +	')
> +
> +	########################################
> +	#
> +	# Shared declarations.
> +	#
> +
> +	type hadoop_$1_t, hadoop_domain;
> +	domain_type(hadoop_$1_t)
> +	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> +	type hadoop_$1_initrc_t;
> +	type hadoop_$1_initrc_exec_t;
> +	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> +	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> +	type hadoop_$1_lock_t;
> +	files_lock_file(hadoop_$1_lock_t)
> +	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> +	type hadoop_$1_log_t;
> +	logging_log_file(hadoop_$1_log_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> +	type hadoop_$1_var_lib_t;
> +	files_type(hadoop_$1_var_lib_t)
> +	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
> +
> +	type hadoop_$1_initrc_var_run_t;
> +	files_pid_file(hadoop_$1_initrc_var_run_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> +
> +	type hadoop_$1_tmp_t;
> +	files_tmp_file(hadoop_$1_tmp_t)
> +	files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 initrc policy.
> +	#
> +
> +	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> +	allow hadoop_$1_initrc_t self:capability { setuid setgid };
> +	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_initrc_t self:process setsched;
> +
> +	consoletype_exec(hadoop_$1_initrc_t)
> +	corecmd_exec_bin(hadoop_$1_initrc_t)
> +	corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> +	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> +	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> +	files_read_etc_files(hadoop_$1_initrc_t)
> +	files_read_usr_files(hadoop_$1_initrc_t)
> +	files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> +	files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> +	files_search_pids(hadoop_$1_initrc_t)
> +	files_search_locks(hadoop_$1_initrc_t)
> +	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> +	hadoop_exec_config_files(hadoop_$1_initrc_t)
> +
> +	init_rw_utmp(hadoop_$1_initrc_t)
> +	init_use_script_ptys(hadoop_$1_initrc_t)
> +
> +	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> +	kernel_read_sysctl(hadoop_$1_initrc_t)
> +	kernel_read_system_state(hadoop_$1_initrc_t)
> +
> +	logging_send_syslog_msg(hadoop_$1_initrc_t)
> +	logging_send_audit_msgs(hadoop_$1_initrc_t)
> +	logging_search_logs(hadoop_$1_initrc_t)
> +
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +	miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_initrc_t)
> +	')
> +
> +	term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> +	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> +	# This can be removed on anything post-el5
> +	libs_use_ld_so(hadoop_$1_initrc_t)
> +	libs_use_shared_libs(hadoop_$1_initrc_t)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 policy.
> +	#
> +
> +	allow hadoop_$1_t hadoop_domain:process signull;
> +	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_t self:process execmem;
> +	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> +	corecmd_exec_bin(hadoop_$1_t)
> +	corecmd_exec_shell(hadoop_$1_t)
> +
> +	dev_read_rand(hadoop_$1_t)
> +	dev_read_urand(hadoop_$1_t)
> +	dev_read_sysfs(hadoop_$1_t)
> +	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +	files_manage_generic_tmp_files(hadoop_$1_t)
> +	files_manage_generic_tmp_dirs(hadoop_$1_t)
> +	files_read_etc_files(hadoop_$1_t)
> +	files_search_pids(hadoop_$1_t)
> +	files_search_var_lib(hadoop_$1_t)
> +
> +	hadoop_exec_config_files(hadoop_$1_t)
> +
> +	java_exec(hadoop_$1_t)
> +
> +	kernel_read_network_state(hadoop_$1_t)
> +	kernel_read_system_state(hadoop_$1_t)
> +
> +	logging_search_logs(hadoop_$1_t)
> +
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> +	miscfiles_read_localization(hadoop_$1_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_t)
> +	')
> +
> +	sysnet_read_config(hadoop_$1_t)
> +
> +	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> +	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> +	corenet_all_recvfrom_netlabel(hadoop_$1_t)
> +	corenet_tcp_bind_all_nodes(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> +	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> +	corenet_tcp_connect_generic_port(hadoop_$1_t)
> +
> +	allow hadoop_$1_t self:udp_socket create_socket_perms;
> +	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> +	corenet_udp_bind_all_nodes(hadoop_$1_t)
> +
> +	# This can be removed on anything post-el5
> +	libs_use_ld_so(hadoop_$1_t)
> +	libs_use_shared_libs(hadoop_$1_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute hadoop in the
> +##	hadoop domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_domtrans',`
> +	gen_require(`
> +		type hadoop_t, hadoop_exec_t;
> +	')
> +
> +	files_search_usr($1)
> +	libs_search_lib($1)
> +	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute hadoop in the hadoop domain,
> +##	and allow the specified role the
> +##	hadoop domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +## <param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`hadoop_run',`
> +	gen_require(`
> +		type hadoop_t;
> +	')
> +
> +	hadoop_domtrans($1)
> +	role $2 types hadoop_t;
> +
> +	allow $1 hadoop_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_client',`
> +	gen_require(`
> +		type zookeeper_t, zookeeper_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper server in the
> +##	zookeeper server domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_t, zookeeper_server_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper server in the
> +##	zookeeper domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`zookeeper_initrc_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_initrc_exec_t;
> +	')
> +
> +	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain, and allow the
> +##	specified role the zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +## <param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`zookeeper_run_client',`
> +	gen_require(`
> +		type zookeeper_t;
> +	')
> +
> +	zookeeper_domtrans_client($1)
> +	role $2 types zookeeper_t;
> +
> +	allow $1 zookeeper_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +##  Give permission to a domain to read
> +##  hadoop_etc_t
> +## </summary>
> +## <param name="domain">
> +##  <summary>
> +##  Domain needing read permission
> +##  </summary>
> +## </param>
> +#
> +interface(`hadoop_read_config_files', `
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	allow $1 hadoop_etc_t:dir search_dir_perms;
> +	allow $1 hadoop_etc_t:lnk_file { read getattr };
> +	allow $1 hadoop_etc_t:file read_file_perms;
> +')
> +
> +########################################
> +## <summary>
> +##  Give permission to a domain to
> +##  execute hadoop_etc_t
> +## </summary>
> +## <param name="domain">
> +##  <summary>
> +##  Domain needing read and execute
> +##  permission
> +##  </summary>
> +## </param>
> +#
> +interface(`hadoop_exec_config_files', `
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	hadoop_read_config_files($1)
> +	allow $1 hadoop_etc_t:file { execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..6a66962
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,360 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
> +files_manage_generic_tmp_dirs(hadoop_t)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +corenet_tcp_connect_generic_port(hadoop_t)
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(hadoop_t)
> +libs_use_shared_libs(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> +	nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> +	nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +fs_associate(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
> +files_manage_generic_tmp_dirs(zookeeper_t)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +corenet_tcp_connect_generic_port(zookeeper_t)
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_t)
> +libs_use_shared_libs(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> +	nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
> +files_manage_generic_tmp_files(zookeeper_server_t)
> +files_manage_generic_tmp_dirs(zookeeper_server_t)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_server_t)
> +libs_use_shared_libs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..ac27264 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
>  ')
>  
>  optional_policy(`
> +	hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
>  	inn_domtrans(unconfined_t)
>  ')
>  
> @@ -210,6 +214,10 @@ optional_policy(`
>  	xserver_domtrans(unconfined_t)
>  ')
>  
> +optional_policy(`
> +	zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
>  ########################################
>  #
>  # Unconfined Execmem Local policy
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101001/97dd83e6/attachment-0001.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-01 18:01               ` Dominick Grift
@ 2010-10-01 19:06                 ` Paul Nuzzi
  0 siblings, 0 replies; 37+ messages in thread
From: Paul Nuzzi @ 2010-10-01 19:06 UTC (permalink / raw)
  To: refpolicy

On 10/01/2010 02:01 PM, Dominick Grift wrote:
> On Fri, Oct 01, 2010 at 11:17:27AM -0400, Paul Nuzzi wrote:
>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>> I updated the patch based on recommendations from the mailing list.
>>>> All of hadoop's services are included in one module instead of 
>>>> individual ones.  Unconfined and sysadm roles are given access to 
>>>> hadoop and zookeeper client domain transitions. The services are started
>>>> using run_init.  Let me know what you think.
>>>
>>> Why do some hadoop domain need to manage generic tmp?
>>>
>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>> files_manage_generic_tmp_dirs(hadoop_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>
>> This has to be done for Java JMX to work.  All of the files are written to
>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>> all the files for each service are labeled with hadoop_*_tmp_t.  The first service 
>> will end up owning the directory if it is not labeled tmp_t.
>>
>>> You probably need:
>>>
>>> files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
>>> becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.
>>
>>> Can use rw_fifo_file_perms here:
>>>
>>> allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
>>>
>>> Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.
>>>
>>> hadoop_rx_etc(hadoop_$1_initrc_t)
>>>
>>> This seems wrong. Why does it need that? use files_search_var_lib() if possible:
>>>
>>> files_read_var_lib_files(hadoop_$1_t)
>>>
>>> This is not a declaration and might want to use filetrans_pattern() instead:
>>>
>>> type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
>>
>> Changed.  Thanks for the comments.
>>  
>>> Other then the above, there are some style issues:
>>>
>>> http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide
>>>
>>> But i can help clean that up once above issues are resolved.
>>>
>>
>> Is there a style checking script for refpolicy patches similar to the Linux kernel?
> 
> Not that i am aware of.
> Are you sure that your entries in hadoop.fc work? You could check by intentionally mislabel the paths and children with chcon and then see if restorecon restores everything properly

Based on testing the paths get labelled correctly with restorecon. I am having an issue with the 
kernel not labelling files and directories correctly because of wildcards in /var/lib/hadoop. 
I gave the services enough permission to relabel what they needed during runtime. I didn't want 
to hard code the directory names because the policy would lose version independence.

>>
>>  
>> Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-01 17:56               ` Christopher J. PeBenito
@ 2010-10-04 17:15                 ` Paul Nuzzi
  2010-10-04 18:18                   ` Christopher J. PeBenito
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-10-04 17:15 UTC (permalink / raw)
  To: refpolicy

On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
> On 10/01/10 11:17, Paul Nuzzi wrote:
>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>> I updated the patch based on recommendations from the mailing list.
>>>> All of hadoop's services are included in one module instead of
>>>> individual ones.  Unconfined and sysadm roles are given access to
>>>> hadoop and zookeeper client domain transitions. The services are started
>>>> using run_init.  Let me know what you think.
>>>
>>> Why do some hadoop domain need to manage generic tmp?
>>>
>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>> files_manage_generic_tmp_dirs(hadoop_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>
>> This has to be done for Java JMX to work.  All of the files are written to
>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>> all the files for each service are labeled with hadoop_*_tmp_t.  The first service
>> will end up owning the directory if it is not labeled tmp_t.
> 
> The hsperfdata dir in /tmp certainly the bane of policy writers.  Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir.  I suggest you do something like
> 
> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
> 
> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
> 

That looks like a better way to handle the tmp_t problem.  

I changed the patch with your comments.  Hopefully this will be one of the last updates.  
Tested on a CDH3 cluster as a module without any problems.  

Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

---
 policy/modules/kernel/corenetwork.te.in |    4 
 policy/modules/roles/sysadm.te          |    8 
 policy/modules/services/hadoop.fc       |   54 ++++
 policy/modules/services/hadoop.if       |  358 ++++++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  380 ++++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 
 6 files changed, 812 insertions(+)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..b46b28b 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..15c61ed
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,54 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid		--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid		--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..33108a3
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,358 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t, hadoop_hsperfdata_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	consoletype_exec(hadoop_$1_initrc_t)
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+	files_search_pids(hadoop_$1_initrc_t)
+	files_search_locks(hadoop_$1_initrc_t)
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	hadoop_exec_config_files(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:process execmem;
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+	files_read_etc_files(hadoop_$1_t)
+	files_search_pids(hadoop_$1_t)
+	files_search_var_lib(hadoop_$1_t)
+
+	hadoop_exec_config_files(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	logging_search_logs(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	miscfiles_read_localization(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+
+	sysnet_read_config(hadoop_$1_t)
+
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to read
+##  hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_read_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file { read getattr };
+	allow $1 hadoop_etc_t:file read_file_perms;
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to
+##  execute hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read and execute
+##  permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_exec_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	hadoop_read_config_files($1)
+	allow $1 hadoop_etc_t:file { execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..519aebb
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,380 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+fs_associate(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..ac27264 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-04 17:15                 ` Paul Nuzzi
@ 2010-10-04 18:18                   ` Christopher J. PeBenito
  2010-10-05 19:59                     ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Christopher J. PeBenito @ 2010-10-04 18:18 UTC (permalink / raw)
  To: refpolicy

On 10/04/10 13:15, Paul Nuzzi wrote:
> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>> I updated the patch based on recommendations from the mailing list.
>>>>> All of hadoop's services are included in one module instead of
>>>>> individual ones.  Unconfined and sysadm roles are given access to
>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>> using run_init.  Let me know what you think.
>>>>
>>>> Why do some hadoop domain need to manage generic tmp?
>>>>
>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>
>>> This has to be done for Java JMX to work.  All of the files are written to
>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>> all the files for each service are labeled with hadoop_*_tmp_t.  The first service
>>> will end up owning the directory if it is not labeled tmp_t.
>>
>> The hsperfdata dir in /tmp certainly the bane of policy writers.  Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir.  I suggest you do something like
>>
>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>
>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>
>
> That looks like a better way to handle the tmp_t problem.
>
> I changed the patch with your comments.  Hopefully this will be one of the last updates.
> Tested on a CDH3 cluster as a module without any problems.

There are several little issues with style, but it'll be easier just to 
fix them when its committed.

Other comments inline.

> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..7a1b5de 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
>   network_port(git, tcp,9418,s0, udp,9418,s0)
>   network_port(gopher, tcp,70,s0, udp,70,s0)
>   network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)

It seems like it would be sufficient to call it "hadoop".

>   network_port(hddtemp, tcp,7634,s0)
>   network_port(howl, tcp,5335,s0, udp,5353,s0)
>   network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
>   network_port(xen, tcp,8002,s0)
>   network_port(xfs, tcp,7100,s0)
>   network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
>   network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
>   network_port(zope, tcp,8021,s0)
>
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index cad05ff..b46b28b 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
>   ')
>
>   optional_policy(`
> +	hadoop_run(sysadm_t, sysadm_r)
> +')
> +optional_policy(`
>   	# allow system administrator to use the ipsec script to look
>   	# at things (e.g., ipsec auto --status)
>   	# probably should create an ipsec_admin role for this kind of thing
> @@ -392,6 +396,10 @@ optional_policy(`
>   	yam_run(sysadm_t, sysadm_r)
>   ')
>
> +optional_policy(`
> +	zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
>   ifndef(`distro_redhat',`
>   	optional_policy(`
>   		auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..15c61ed
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,54 @@
> +/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid		--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid		--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)

Missing some escaping on the periods: \.pid

> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..33108a3
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,358 @@
> +##<summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +##<summary>
> +##	The template to define a hadoop domain.
> +##</summary>
> +##<param name="domain_prefix">
> +##	<summary>
> +##	Domain prefix to be used.
> +##	</summary>
> +##</param>
> +#
> +template(`hadoop_domain_template',`
> +	gen_require(`
> +		attribute hadoop_domain;
> +		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> +		type hadoop_exec_t, hadoop_hsperfdata_t;
> +	')
> +
> +	########################################
> +	#
> +	# Shared declarations.
> +	#
> +
> +	type hadoop_$1_t, hadoop_domain;
> +	domain_type(hadoop_$1_t)
> +	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> +	type hadoop_$1_initrc_t;
> +	type hadoop_$1_initrc_exec_t;
> +	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> +	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> +	type hadoop_$1_lock_t;
> +	files_lock_file(hadoop_$1_lock_t)
> +	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> +	type hadoop_$1_log_t;
> +	logging_log_file(hadoop_$1_log_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> +	type hadoop_$1_var_lib_t;
> +	files_type(hadoop_$1_var_lib_t)
> +	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
> +
> +	type hadoop_$1_initrc_var_run_t;
> +	files_pid_file(hadoop_$1_initrc_var_run_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> +
> +	type hadoop_$1_tmp_t;
> +	files_tmp_file(hadoop_$1_tmp_t)
> +	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
> +	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 initrc policy.
> +	#
> +
> +	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> +	allow hadoop_$1_initrc_t self:capability { setuid setgid };
> +	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_initrc_t self:process setsched;
> +
> +	consoletype_exec(hadoop_$1_initrc_t)
> +	corecmd_exec_bin(hadoop_$1_initrc_t)
> +	corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> +	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> +	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> +	files_read_etc_files(hadoop_$1_initrc_t)
> +	files_read_usr_files(hadoop_$1_initrc_t)
> +	files_search_pids(hadoop_$1_initrc_t)
> +	files_search_locks(hadoop_$1_initrc_t)
> +	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> +	hadoop_exec_config_files(hadoop_$1_initrc_t)
> +
> +	init_rw_utmp(hadoop_$1_initrc_t)
> +	init_use_script_ptys(hadoop_$1_initrc_t)
> +
> +	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> +	kernel_read_sysctl(hadoop_$1_initrc_t)
> +	kernel_read_system_state(hadoop_$1_initrc_t)
> +
> +	logging_send_syslog_msg(hadoop_$1_initrc_t)
> +	logging_send_audit_msgs(hadoop_$1_initrc_t)
> +	logging_search_logs(hadoop_$1_initrc_t)
> +
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +	miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_initrc_t)
> +	')
> +
> +	term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> +	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> +	# This can be removed on anything post-el5
> +	libs_use_ld_so(hadoop_$1_initrc_t)
> +	libs_use_shared_libs(hadoop_$1_initrc_t)

Upstream handles shared libs appropriately, so this should be removed. 
There are other instances of this in the patch that can be removed too.

> +	####################################
> +	#
> +	# Shared hadoop_$1 policy.
> +	#
> +
> +	allow hadoop_$1_t hadoop_domain:process signull;
> +	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_t self:process execmem;
> +	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> +	corecmd_exec_bin(hadoop_$1_t)
> +	corecmd_exec_shell(hadoop_$1_t)
> +
> +	dev_read_rand(hadoop_$1_t)
> +	dev_read_urand(hadoop_$1_t)
> +	dev_read_sysfs(hadoop_$1_t)
> +	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +	files_read_etc_files(hadoop_$1_t)
> +	files_search_pids(hadoop_$1_t)
> +	files_search_var_lib(hadoop_$1_t)
> +
> +	hadoop_exec_config_files(hadoop_$1_t)
> +
> +	java_exec(hadoop_$1_t)
> +
> +	kernel_read_network_state(hadoop_$1_t)
> +	kernel_read_system_state(hadoop_$1_t)
> +
> +	logging_search_logs(hadoop_$1_t)
> +
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> +	miscfiles_read_localization(hadoop_$1_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_t)
> +	')
> +
> +	sysnet_read_config(hadoop_$1_t)
> +
> +	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> +	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> +	corenet_all_recvfrom_netlabel(hadoop_$1_t)
> +	corenet_tcp_bind_all_nodes(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> +	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> +	corenet_tcp_connect_generic_port(hadoop_$1_t)

This looks questionable.  The port it connects to can't be identified?

> +
> +	allow hadoop_$1_t self:udp_socket create_socket_perms;
> +	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> +	corenet_udp_bind_all_nodes(hadoop_$1_t)
> +
> +	# This can be removed on anything post-el5
> +	libs_use_ld_so(hadoop_$1_t)
> +	libs_use_shared_libs(hadoop_$1_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute hadoop in the
> +##	hadoop domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans',`
> +	gen_require(`
> +		type hadoop_t, hadoop_exec_t;
> +	')
> +
> +	files_search_usr($1)
> +	libs_search_lib($1)
> +	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute hadoop in the hadoop domain,
> +##	and allow the specified role the
> +##	hadoop domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +##<param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`hadoop_run',`
> +	gen_require(`
> +		type hadoop_t;
> +	')
> +
> +	hadoop_domtrans($1)
> +	role $2 types hadoop_t;
> +
> +	allow $1 hadoop_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`zookeeper_domtrans_client',`

The convention is to have the interface name first.  So this should be 
something like hadoop_domtrans_zookeper_client.

> +	gen_require(`
> +		type zookeeper_t, zookeeper_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper server in the
> +##	zookeeper server domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`zookeeper_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_t, zookeeper_server_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper server in the
> +##	zookeeper domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`zookeeper_initrc_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_initrc_exec_t;
> +	')
> +
> +	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain, and allow the
> +##	specified role the zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +##<param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`zookeeper_run_client',`
> +	gen_require(`
> +		type zookeeper_t;
> +	')
> +
> +	zookeeper_domtrans_client($1)
> +	role $2 types zookeeper_t;
> +
> +	allow $1 zookeeper_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +##  Give permission to a domain to read
> +##  hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +##  Domain needing read permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_read_config_files', `
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	allow $1 hadoop_etc_t:dir search_dir_perms;
> +	allow $1 hadoop_etc_t:lnk_file { read getattr };
> +	allow $1 hadoop_etc_t:file read_file_perms;
> +')
> +
> +########################################
> +##<summary>
> +##  Give permission to a domain to
> +##  execute hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +##  Domain needing read and execute
> +##  permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_exec_config_files', `
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	hadoop_read_config_files($1)
> +	allow $1 hadoop_etc_t:file { execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..519aebb
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,380 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +type hadoop_hsperfdata_t;
> +files_tmp_file(hadoop_hsperfdata_t)
> +ubac_constrained(hadoop_hsperfdata_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +corenet_tcp_connect_generic_port(hadoop_t)

This port can't be identified?

> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(hadoop_t)
> +libs_use_shared_libs(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> +	nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> +	nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +allow hadoop_datanode_t self:process signal;
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +fs_associate(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +corenet_tcp_connect_generic_port(zookeeper_t)

Another port to lock down if possible.  Please recheck the port usage 
across the board.

> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_t)
> +libs_use_shared_libs(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> +	nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_server_t self:udp_socket create_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_udp_bind_all_nodes(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_server_t)
> +libs_use_shared_libs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..ac27264 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
>   ')
>
>   optional_policy(`
> +	hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
>   	inn_domtrans(unconfined_t)
>   ')
>
> @@ -210,6 +214,10 @@ optional_policy(`
>   	xserver_domtrans(unconfined_t)
>   ')
>
> +optional_policy(`
> +	zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
>   ########################################
>   #
>   # Unconfined Execmem Local policy


-- 
Chris PeBenito
Tresys Technology, LLC
www.tresys.com | oss.tresys.com

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-04 18:18                   ` Christopher J. PeBenito
@ 2010-10-05 19:59                     ` Paul Nuzzi
  2010-10-07 14:41                       ` Chris PeBenito
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-10-05 19:59 UTC (permalink / raw)
  To: refpolicy

On 10/04/2010 02:18 PM, Christopher J. PeBenito wrote:
> On 10/04/10 13:15, Paul Nuzzi wrote:
>> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>>> I updated the patch based on recommendations from the mailing list.
>>>>>> All of hadoop's services are included in one module instead of
>>>>>> individual ones.  Unconfined and sysadm roles are given access to
>>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>>> using run_init.  Let me know what you think.
>>>>>
>>>>> Why do some hadoop domain need to manage generic tmp?
>>>>>
>>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>>
>>>> This has to be done for Java JMX to work.  All of the files are written to
>>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>>> all the files for each service are labeled with hadoop_*_tmp_t.  The first service
>>>> will end up owning the directory if it is not labeled tmp_t.
>>>
>>> The hsperfdata dir in /tmp certainly the bane of policy writers.  Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir.  I suggest you do something like
>>>
>>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>>
>>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>>
>>
>> That looks like a better way to handle the tmp_t problem.
>>
>> I changed the patch with your comments.  Hopefully this will be one of the last updates.
>> Tested on a CDH3 cluster as a module without any problems.
> 
> There are several little issues with style, but it'll be easier just to fix them when its committed.
> 
> Other comments inline.
> 

I did my best locking down the ports hadoop uses.  Unfortunately the services use high, randomized ports making
tcp_connect_generic_port a must have.  Hopefully one day hadoop will settle on static ports.  I added hadoop_datanode port 50010 since it is important to lock down that service.  I changed the patch based on the rest of the comments.

Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

---
 policy/modules/kernel/corenetwork.te.in |    5 
 policy/modules/roles/sysadm.te          |    8 
 policy/modules/services/hadoop.fc       |   54 ++++
 policy/modules/services/hadoop.if       |  352 +++++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  379 ++++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 
 6 files changed, 806 insertions(+)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..73163db 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_datanode, tcp, 50010,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..d2bc2b1 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..a09275d
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,54 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid	--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid	--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..e919bcb
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,352 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t, hadoop_hsperfdata_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	consoletype_exec(hadoop_$1_initrc_t)
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+	files_search_pids(hadoop_$1_initrc_t)
+	files_search_locks(hadoop_$1_initrc_t)
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	hadoop_exec_config_files(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:process execmem;
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+	files_read_etc_files(hadoop_$1_t)
+	files_search_pids(hadoop_$1_t)
+	files_search_var_lib(hadoop_$1_t)
+
+	hadoop_exec_config_files(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	logging_search_logs(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	miscfiles_read_localization(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+
+	sysnet_read_config(hadoop_$1_t)
+
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	# Hadoop uses high ordered random ports for services
+	# If permanent ports are chosen, remove line below and lock down
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	hadoop_domtrans_zookeeper_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to read
+##  hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_read_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file { read getattr };
+	allow $1 hadoop_etc_t:file read_file_perms;
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to
+##  execute hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read and execute
+##  permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_exec_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	hadoop_read_config_files($1)
+	allow $1 hadoop_etc_t:file { execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..587c393
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,379 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_associate(hadoop_tasktracker_t)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..f1e6c9f 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-05 19:59                     ` Paul Nuzzi
@ 2010-10-07 14:41                       ` Chris PeBenito
  2010-10-07 16:35                         ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Chris PeBenito @ 2010-10-07 14:41 UTC (permalink / raw)
  To: refpolicy

On 10/05/10 15:59, Paul Nuzzi wrote:
> On 10/04/2010 02:18 PM, Christopher J. PeBenito wrote:
>> On 10/04/10 13:15, Paul Nuzzi wrote:
>>> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>>>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>>>> I updated the patch based on recommendations from the mailing list.
>>>>>>> All of hadoop's services are included in one module instead of
>>>>>>> individual ones.  Unconfined and sysadm roles are given access to
>>>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>>>> using run_init.  Let me know what you think.
>>>>>>
>>>>>> Why do some hadoop domain need to manage generic tmp?
>>>>>>
>>>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>>>
>>>>> This has to be done for Java JMX to work.  All of the files are written to
>>>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>>>> all the files for each service are labeled with hadoop_*_tmp_t.  The first service
>>>>> will end up owning the directory if it is not labeled tmp_t.
>>>>
>>>> The hsperfdata dir in /tmp certainly the bane of policy writers.  Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir.  I suggest you do something like
>>>>
>>>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>>>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>>>
>>>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>>>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>>>
>>>
>>> That looks like a better way to handle the tmp_t problem.
>>>
>>> I changed the patch with your comments.  Hopefully this will be one of the last updates.
>>> Tested on a CDH3 cluster as a module without any problems.
>>
>> There are several little issues with style, but it'll be easier just to fix them when its committed.
>>
>> Other comments inline.
>>
>
> I did my best locking down the ports hadoop uses.  Unfortunately the services use high, randomized ports making
> tcp_connect_generic_port a must have.  Hopefully one day hadoop will settle on static ports.  I added hadoop_datanode port 50010 since it is important to lock down that service.  I changed the patch based on the rest of the comments.

Merged.  I've made several changes:

* a pass cleaning up the style.
* adjusted some regular expressions in the file contexts: .* is the same 
as (.*)? since * means 0 or more matches.
* renamed a few interfaces
* two rules that I dropped as they require further explanation

 > +files_read_all_files(hadoop_t)

A very big privilege.

and

 > +fs_associate(hadoop_tasktracker_t)

This is a domain, so the only files with this type should be the 
/proc/pid ones, which don't require associate permissions.


> ---
>   policy/modules/kernel/corenetwork.te.in |    5
>   policy/modules/roles/sysadm.te          |    8
>   policy/modules/services/hadoop.fc       |   54 ++++
>   policy/modules/services/hadoop.if       |  352 +++++++++++++++++++++++++++++
>   policy/modules/services/hadoop.te       |  379 ++++++++++++++++++++++++++++++++
>   policy/modules/system/unconfined.te     |    8
>   6 files changed, 806 insertions(+)
>
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..73163db 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
>   network_port(git, tcp,9418,s0, udp,9418,s0)
>   network_port(gopher, tcp,70,s0, udp,70,s0)
>   network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_datanode, tcp, 50010,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
>   network_port(hddtemp, tcp,7634,s0)
>   network_port(howl, tcp,5335,s0, udp,5353,s0)
>   network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
>   network_port(xen, tcp,8002,s0)
>   network_port(xfs, tcp,7100,s0)
>   network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
>   network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
>   network_port(zope, tcp,8021,s0)
>
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index cad05ff..d2bc2b1 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
>   ')
>
>   optional_policy(`
> +	hadoop_run(sysadm_t, sysadm_r)
> +')
> +
> +optional_policy(`
>   	# allow system administrator to use the ipsec script to look
>   	# at things (e.g., ipsec auto --status)
>   	# probably should create an ipsec_admin role for this kind of thing
> @@ -392,6 +396,10 @@ optional_policy(`
>   	yam_run(sysadm_t, sysadm_r)
>   ')
>
> +optional_policy(`
> +	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
>   ifndef(`distro_redhat',`
>   	optional_policy(`
>   		auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..a09275d
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,54 @@
> +/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid	--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid	--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..e919bcb
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,352 @@
> +##<summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +##<summary>
> +##	The template to define a hadoop domain.
> +##</summary>
> +##<param name="domain_prefix">
> +##	<summary>
> +##	Domain prefix to be used.
> +##	</summary>
> +##</param>
> +#
> +template(`hadoop_domain_template',`
> +	gen_require(`
> +		attribute hadoop_domain;
> +		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> +		type hadoop_exec_t, hadoop_hsperfdata_t;
> +	')
> +
> +	########################################
> +	#
> +	# Shared declarations.
> +	#
> +
> +	type hadoop_$1_t, hadoop_domain;
> +	domain_type(hadoop_$1_t)
> +	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> +	type hadoop_$1_initrc_t;
> +	type hadoop_$1_initrc_exec_t;
> +	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> +	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> +	type hadoop_$1_lock_t;
> +	files_lock_file(hadoop_$1_lock_t)
> +	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> +	type hadoop_$1_log_t;
> +	logging_log_file(hadoop_$1_log_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> +	type hadoop_$1_var_lib_t;
> +	files_type(hadoop_$1_var_lib_t)
> +	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
> +
> +	type hadoop_$1_initrc_var_run_t;
> +	files_pid_file(hadoop_$1_initrc_var_run_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> +
> +	type hadoop_$1_tmp_t;
> +	files_tmp_file(hadoop_$1_tmp_t)
> +	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
> +	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 initrc policy.
> +	#
> +
> +	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> +	allow hadoop_$1_initrc_t self:capability { setuid setgid };
> +	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_initrc_t self:process setsched;
> +
> +	consoletype_exec(hadoop_$1_initrc_t)
> +	corecmd_exec_bin(hadoop_$1_initrc_t)
> +	corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> +	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> +	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> +	files_read_etc_files(hadoop_$1_initrc_t)
> +	files_read_usr_files(hadoop_$1_initrc_t)
> +	files_search_pids(hadoop_$1_initrc_t)
> +	files_search_locks(hadoop_$1_initrc_t)
> +	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> +	hadoop_exec_config_files(hadoop_$1_initrc_t)
> +
> +	init_rw_utmp(hadoop_$1_initrc_t)
> +	init_use_script_ptys(hadoop_$1_initrc_t)
> +
> +	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> +	kernel_read_sysctl(hadoop_$1_initrc_t)
> +	kernel_read_system_state(hadoop_$1_initrc_t)
> +
> +	logging_send_syslog_msg(hadoop_$1_initrc_t)
> +	logging_send_audit_msgs(hadoop_$1_initrc_t)
> +	logging_search_logs(hadoop_$1_initrc_t)
> +
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +	miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_initrc_t)
> +	')
> +
> +	term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> +	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 policy.
> +	#
> +
> +	allow hadoop_$1_t hadoop_domain:process signull;
> +	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_t self:process execmem;
> +	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> +	corecmd_exec_bin(hadoop_$1_t)
> +	corecmd_exec_shell(hadoop_$1_t)
> +
> +	dev_read_rand(hadoop_$1_t)
> +	dev_read_urand(hadoop_$1_t)
> +	dev_read_sysfs(hadoop_$1_t)
> +	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +	files_read_etc_files(hadoop_$1_t)
> +	files_search_pids(hadoop_$1_t)
> +	files_search_var_lib(hadoop_$1_t)
> +
> +	hadoop_exec_config_files(hadoop_$1_t)
> +
> +	java_exec(hadoop_$1_t)
> +
> +	kernel_read_network_state(hadoop_$1_t)
> +	kernel_read_system_state(hadoop_$1_t)
> +
> +	logging_search_logs(hadoop_$1_t)
> +
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> +	miscfiles_read_localization(hadoop_$1_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_t)
> +	')
> +
> +	sysnet_read_config(hadoop_$1_t)
> +
> +	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> +	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> +	corenet_all_recvfrom_netlabel(hadoop_$1_t)
> +	corenet_tcp_bind_all_nodes(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> +	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> +	# Hadoop uses high ordered random ports for services
> +	# If permanent ports are chosen, remove line below and lock down
> +	corenet_tcp_connect_generic_port(hadoop_$1_t)
> +
> +	allow hadoop_$1_t self:udp_socket create_socket_perms;
> +	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> +	corenet_udp_bind_all_nodes(hadoop_$1_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute hadoop in the
> +##	hadoop domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans',`
> +	gen_require(`
> +		type hadoop_t, hadoop_exec_t;
> +	')
> +
> +	files_search_usr($1)
> +	libs_search_lib($1)
> +	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute hadoop in the hadoop domain,
> +##	and allow the specified role the
> +##	hadoop domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +##<param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`hadoop_run',`
> +	gen_require(`
> +		type hadoop_t;
> +	')
> +
> +	hadoop_domtrans($1)
> +	role $2 types hadoop_t;
> +
> +	allow $1 hadoop_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans_zookeeper_client',`
> +	gen_require(`
> +		type zookeeper_t, zookeeper_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper server in the
> +##	zookeeper server domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans_zookeeper_server',`
> +	gen_require(`
> +		type zookeeper_server_t, zookeeper_server_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	files_search_usr($1)
> +	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper server in the
> +##	zookeeper domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +#
> +interface(`hadoop_zookeeper_initrc_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_initrc_exec_t;
> +	')
> +
> +	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +##<summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain, and allow the
> +##	specified role the zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +##</param>
> +##<param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`hadoop_zookeeper_run_client',`
> +	gen_require(`
> +		type zookeeper_t;
> +	')
> +
> +	hadoop_domtrans_zookeeper_client($1)
> +	role $2 types zookeeper_t;
> +
> +	allow $1 zookeeper_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +##  Give permission to a domain to read
> +##  hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +##  Domain needing read permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_read_config_files', `
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	allow $1 hadoop_etc_t:dir search_dir_perms;
> +	allow $1 hadoop_etc_t:lnk_file { read getattr };
> +	allow $1 hadoop_etc_t:file read_file_perms;
> +')
> +
> +########################################
> +##<summary>
> +##  Give permission to a domain to
> +##  execute hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +##  Domain needing read and execute
> +##  permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_exec_config_files', `
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	hadoop_read_config_files($1)
> +	allow $1 hadoop_etc_t:file { execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..587c393
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,379 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +type hadoop_hsperfdata_t;
> +files_tmp_file(hadoop_hsperfdata_t)
> +ubac_constrained(hadoop_hsperfdata_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(hadoop_t)
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> +	nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> +	nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +allow hadoop_datanode_t self:process signal;
> +corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_associate(hadoop_tasktracker_t)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(zookeeper_t)
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> +	nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_server_t self:udp_socket create_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_udp_bind_all_nodes(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..f1e6c9f 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
>   ')
>
>   optional_policy(`
> +	hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
>   	inn_domtrans(unconfined_t)
>   ')
>
> @@ -210,6 +214,10 @@ optional_policy(`
>   	xserver_domtrans(unconfined_t)
>   ')
>
> +optional_policy(`
> +	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
>   ########################################
>   #
>   # Unconfined Execmem Local policy


-- 
Chris PeBenito
<pebenito@gentoo.org>
Developer,
Hardened Gentoo Linux

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-07 14:41                       ` Chris PeBenito
@ 2010-10-07 16:35                         ` Paul Nuzzi
  0 siblings, 0 replies; 37+ messages in thread
From: Paul Nuzzi @ 2010-10-07 16:35 UTC (permalink / raw)
  To: refpolicy

On 10/07/2010 10:41 AM, Chris PeBenito wrote:
> On 10/05/10 15:59, Paul Nuzzi wrote:
>> On 10/04/2010 02:18 PM, Christopher J. PeBenito wrote:
>>> On 10/04/10 13:15, Paul Nuzzi wrote:
>>>> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>>>>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>>>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>>>>> I updated the patch based on recommendations from the mailing list.
>>>>>>>> All of hadoop's services are included in one module instead of
>>>>>>>> individual ones.  Unconfined and sysadm roles are given access to
>>>>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>>>>> using run_init.  Let me know what you think.
>>>>>>>
>>>>>>> Why do some hadoop domain need to manage generic tmp?
>>>>>>>
>>>>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>>>>
>>>>>> This has to be done for Java JMX to work.  All of the files are written to
>>>>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>>>>> all the files for each service are labeled with hadoop_*_tmp_t.  The first service
>>>>>> will end up owning the directory if it is not labeled tmp_t.
>>>>>
>>>>> The hsperfdata dir in /tmp certainly the bane of policy writers.  Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir.  I suggest you do something like
>>>>>
>>>>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>>>>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>>>>
>>>>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>>>>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>>>>
>>>>
>>>> That looks like a better way to handle the tmp_t problem.
>>>>
>>>> I changed the patch with your comments.  Hopefully this will be one of the last updates.
>>>> Tested on a CDH3 cluster as a module without any problems.
>>>
>>> There are several little issues with style, but it'll be easier just to fix them when its committed.
>>>
>>> Other comments inline.
>>>
>>
>> I did my best locking down the ports hadoop uses.  Unfortunately the services use high, randomized ports making
>> tcp_connect_generic_port a must have.  Hopefully one day hadoop will settle on static ports.  I added hadoop_datanode port 50010 since it is important to lock down that service.  I changed the patch based on the rest of the comments.
> 
> Merged.  I've made several changes:

Thanks to everyone who helped get this merged.

> 
> * a pass cleaning up the style.
> * adjusted some regular expressions in the file contexts: .* is the same as (.*)? since * means 0 or more matches.
> * renamed a few interfaces
> * two rules that I dropped as they require further explanation
> 
>> +files_read_all_files(hadoop_t)
> 
> A very big privilege.

"hadoop fs -put" takes any file you are allowed to access and puts it into the distributed file system.

> and
> 
>> +fs_associate(hadoop_tasktracker_t)

This might not be needed.

> This is a domain, so the only files with this type should be the /proc/pid ones, which don't require associate permissions.
> 
> 
>> ---
>>   policy/modules/kernel/corenetwork.te.in |    5
>>   policy/modules/roles/sysadm.te          |    8
>>   policy/modules/services/hadoop.fc       |   54 ++++
>>   policy/modules/services/hadoop.if       |  352 +++++++++++++++++++++++++++++
>>   policy/modules/services/hadoop.te       |  379 ++++++++++++++++++++++++++++++++
>>   policy/modules/system/unconfined.te     |    8
>>   6 files changed, 806 insertions(+)
>>
>> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
>> index 2ecdde8..73163db 100644
>> --- a/policy/modules/kernel/corenetwork.te.in
>> +++ b/policy/modules/kernel/corenetwork.te.in
>> @@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
>>   network_port(git, tcp,9418,s0, udp,9418,s0)
>>   network_port(gopher, tcp,70,s0, udp,70,s0)
>>   network_port(gpsd, tcp,2947,s0)
>> +network_port(hadoop_datanode, tcp, 50010,s0)
>> +network_port(hadoop_namenode, tcp, 8020,s0)
>>   network_port(hddtemp, tcp,7634,s0)
>>   network_port(howl, tcp,5335,s0, udp,5353,s0)
>>   network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
>> @@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
>>   network_port(xen, tcp,8002,s0)
>>   network_port(xfs, tcp,7100,s0)
>>   network_port(xserver, tcp,6000-6020,s0)
>> +network_port(zookeeper_client, tcp, 2181,s0)
>> +network_port(zookeeper_election, tcp, 3888,s0)
>> +network_port(zookeeper_leader, tcp, 2888,s0)
>>   network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
>>   network_port(zope, tcp,8021,s0)
>>
>> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
>> index cad05ff..d2bc2b1 100644
>> --- a/policy/modules/roles/sysadm.te
>> +++ b/policy/modules/roles/sysadm.te
>> @@ -152,6 +152,10 @@ optional_policy(`
>>   ')
>>
>>   optional_policy(`
>> +    hadoop_run(sysadm_t, sysadm_r)
>> +')
>> +
>> +optional_policy(`
>>       # allow system administrator to use the ipsec script to look
>>       # at things (e.g., ipsec auto --status)
>>       # probably should create an ipsec_admin role for this kind of thing
>> @@ -392,6 +396,10 @@ optional_policy(`
>>       yam_run(sysadm_t, sysadm_r)
>>   ')
>>
>> +optional_policy(`
>> +    hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
>> +')
>> +
>>   ifndef(`distro_redhat',`
>>       optional_policy(`
>>           auth_role(sysadm_r, sysadm_t)
>> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
>> new file mode 100644
>> index 0000000..a09275d
>> --- /dev/null
>> +++ b/policy/modules/services/hadoop.fc
>> @@ -0,0 +1,54 @@
>> +/etc/hadoop.*(/.*)?                        gen_context(system_u:object_r:hadoop_etc_t,s0)
>> +
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode        --    gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker        --    gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode        --    gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode    --    gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker        --    gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-zookeeper            --    gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-datanode                --    gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-jobtracker                --    gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-namenode                --    gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-secondarynamenode            --    gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-tasktracker                --    gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
>> +/etc/init\.d/zookeeper                    --    gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
>> +
>> +/etc/zookeeper(/.*)?                        gen_context(system_u:object_r:zookeeper_etc_t,s0)
>> +/etc/zookeeper\.dist(/.*)?                    gen_context(system_u:object_r:zookeeper_etc_t,s0)
>> +
>> +/usr/lib/hadoop(.*)?/bin/hadoop                --    gen_context(system_u:object_r:hadoop_exec_t,s0)
>> +
>> +/usr/bin/zookeeper-client                --    gen_context(system_u:object_r:zookeeper_exec_t,s0)
>> +/usr/bin/zookeeper-server                --    gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
>> +
>> +/var/zookeeper(/.*)?                        gen_context(system_u:object_r:zookeeper_server_var_t,s0)
>> +/var/lib/zookeeper(/.*)?                    gen_context(system_u:object_r:zookeeper_server_var_t,s0)
>> +
>> +/var/lib/hadoop(.*)?                        gen_context(system_u:object_r:hadoop_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?        gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?        gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?    gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?        gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?    gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
>> +
>> +/var/lock/subsys/hadoop-datanode            --    gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
>> +/var/lock/subsys/hadoop-namenode            --    gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
>> +/var/lock/subsys/hadoop-jobtracker            --    gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
>> +/var/lock/subsys/hadoop-tasktracker            --    gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
>> +/var/lock/subsys/hadoop-secondarynamenode        --    gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
>> +
>> +/var/log/hadoop(.*)?                        gen_context(system_u:object_r:hadoop_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?        gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?        gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?        gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?    gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?        gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
>> +/var/log/hadoop(.*)?/history(/.*)?                gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
>> +/var/log/zookeeper(/.*)?                    gen_context(system_u:object_r:zookeeper_log_t,s0)
>> +
>> +/var/run/hadoop(.*)?                    -d    gen_context(system_u:object_r:hadoop_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid    --    gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid    --    gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid    --    gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid    --    gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid    --    gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
>> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
>> new file mode 100644
>> index 0000000..e919bcb
>> --- /dev/null
>> +++ b/policy/modules/services/hadoop.if
>> @@ -0,0 +1,352 @@
>> +##<summary>Software for reliable, scalable, distributed computing.</summary>
>> +
>> +#######################################
>> +##<summary>
>> +##    The template to define a hadoop domain.
>> +##</summary>
>> +##<param name="domain_prefix">
>> +##    <summary>
>> +##    Domain prefix to be used.
>> +##    </summary>
>> +##</param>
>> +#
>> +template(`hadoop_domain_template',`
>> +    gen_require(`
>> +        attribute hadoop_domain;
>> +        type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
>> +        type hadoop_exec_t, hadoop_hsperfdata_t;
>> +    ')
>> +
>> +    ########################################
>> +    #
>> +    # Shared declarations.
>> +    #
>> +
>> +    type hadoop_$1_t, hadoop_domain;
>> +    domain_type(hadoop_$1_t)
>> +    domain_entry_file(hadoop_$1_t, hadoop_exec_t)
>> +
>> +    type hadoop_$1_initrc_t;
>> +    type hadoop_$1_initrc_exec_t;
>> +    init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
>> +
>> +    role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
>> +
>> +    type hadoop_$1_lock_t;
>> +    files_lock_file(hadoop_$1_lock_t)
>> +    files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
>> +
>> +    type hadoop_$1_log_t;
>> +    logging_log_file(hadoop_$1_log_t)
>> +    filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
>> +    filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
>> +
>> +    type hadoop_$1_var_lib_t;
>> +    files_type(hadoop_$1_var_lib_t)
>> +    filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
>> +
>> +    type hadoop_$1_initrc_var_run_t;
>> +    files_pid_file(hadoop_$1_initrc_var_run_t)
>> +    filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
>> +
>> +    type hadoop_$1_tmp_t;
>> +    files_tmp_file(hadoop_$1_tmp_t)
>> +    files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
>> +    filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
>> +
>> +    ####################################
>> +    #
>> +    # Shared hadoop_$1 initrc policy.
>> +    #
>> +
>> +    allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
>> +    allow hadoop_$1_initrc_t self:capability { setuid setgid };
>> +    allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
>> +    allow hadoop_$1_initrc_t self:process setsched;
>> +
>> +    consoletype_exec(hadoop_$1_initrc_t)
>> +    corecmd_exec_bin(hadoop_$1_initrc_t)
>> +    corecmd_exec_shell(hadoop_$1_initrc_t)
>> +
>> +    domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
>> +    dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
>> +
>> +    files_read_etc_files(hadoop_$1_initrc_t)
>> +    files_read_usr_files(hadoop_$1_initrc_t)
>> +    files_search_pids(hadoop_$1_initrc_t)
>> +    files_search_locks(hadoop_$1_initrc_t)
>> +    fs_getattr_xattr_fs(hadoop_$1_initrc_t)
>> +
>> +    hadoop_exec_config_files(hadoop_$1_initrc_t)
>> +
>> +    init_rw_utmp(hadoop_$1_initrc_t)
>> +    init_use_script_ptys(hadoop_$1_initrc_t)
>> +
>> +    kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
>> +    kernel_read_sysctl(hadoop_$1_initrc_t)
>> +    kernel_read_system_state(hadoop_$1_initrc_t)
>> +
>> +    logging_send_syslog_msg(hadoop_$1_initrc_t)
>> +    logging_send_audit_msgs(hadoop_$1_initrc_t)
>> +    logging_search_logs(hadoop_$1_initrc_t)
>> +
>> +    manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
>> +    manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
>> +    manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
>> +    manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
>> +    manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
>> +
>> +    miscfiles_read_localization(hadoop_$1_initrc_t)
>> +
>> +    optional_policy(`
>> +        nscd_socket_use(hadoop_$1_initrc_t)
>> +    ')
>> +
>> +    term_use_generic_ptys(hadoop_$1_initrc_t)
>> +
>> +    userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
>> +
>> +    ####################################
>> +    #
>> +    # Shared hadoop_$1 policy.
>> +    #
>> +
>> +    allow hadoop_$1_t hadoop_domain:process signull;
>> +    allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
>> +    allow hadoop_$1_t self:process execmem;
>> +    allow hadoop_$1_t hadoop_var_run_t:dir getattr;
>> +
>> +    corecmd_exec_bin(hadoop_$1_t)
>> +    corecmd_exec_shell(hadoop_$1_t)
>> +
>> +    dev_read_rand(hadoop_$1_t)
>> +    dev_read_urand(hadoop_$1_t)
>> +    dev_read_sysfs(hadoop_$1_t)
>> +    dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
>> +
>> +    files_read_etc_files(hadoop_$1_t)
>> +    files_search_pids(hadoop_$1_t)
>> +    files_search_var_lib(hadoop_$1_t)
>> +
>> +    hadoop_exec_config_files(hadoop_$1_t)
>> +
>> +    java_exec(hadoop_$1_t)
>> +
>> +    kernel_read_network_state(hadoop_$1_t)
>> +    kernel_read_system_state(hadoop_$1_t)
>> +
>> +    logging_search_logs(hadoop_$1_t)
>> +
>> +    manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
>> +    manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> +    manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
>> +    manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
>> +    manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
>> +    miscfiles_read_localization(hadoop_$1_t)
>> +
>> +    optional_policy(`
>> +        nscd_socket_use(hadoop_$1_t)
>> +    ')
>> +
>> +    sysnet_read_config(hadoop_$1_t)
>> +
>> +    allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
>> +    corenet_all_recvfrom_unlabeled(hadoop_$1_t)
>> +    corenet_all_recvfrom_netlabel(hadoop_$1_t)
>> +    corenet_tcp_bind_all_nodes(hadoop_$1_t)
>> +    corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
>> +    corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
>> +    corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
>> +    # Hadoop uses high ordered random ports for services
>> +    # If permanent ports are chosen, remove line below and lock down
>> +    corenet_tcp_connect_generic_port(hadoop_$1_t)
>> +
>> +    allow hadoop_$1_t self:udp_socket create_socket_perms;
>> +    corenet_udp_sendrecv_generic_if(hadoop_$1_t)
>> +    corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
>> +    corenet_udp_bind_all_nodes(hadoop_$1_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##    Execute hadoop in the
>> +##    hadoop domain.
>> +##</summary>
>> +##<param name="domain">
>> +##    <summary>
>> +##    Domain allowed to transition.
>> +##    </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_domtrans',`
>> +    gen_require(`
>> +        type hadoop_t, hadoop_exec_t;
>> +    ')
>> +
>> +    files_search_usr($1)
>> +    libs_search_lib($1)
>> +    domtrans_pattern($1, hadoop_exec_t, hadoop_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##    Execute hadoop in the hadoop domain,
>> +##    and allow the specified role the
>> +##    hadoop domain.
>> +##</summary>
>> +##<param name="domain">
>> +##    <summary>
>> +##    Domain allowed to transition.
>> +##    </summary>
>> +##</param>
>> +##<param name="role">
>> +##    <summary>
>> +##    Role allowed access.
>> +##    </summary>
>> +##</param>
>> +##<rolecap/>
>> +#
>> +interface(`hadoop_run',`
>> +    gen_require(`
>> +        type hadoop_t;
>> +    ')
>> +
>> +    hadoop_domtrans($1)
>> +    role $2 types hadoop_t;
>> +
>> +    allow $1 hadoop_t:process { ptrace signal_perms };
>> +    ps_process_pattern($1, hadoop_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##    Execute zookeeper client in the
>> +##    zookeeper client domain.
>> +##</summary>
>> +##<param name="domain">
>> +##    <summary>
>> +##    Domain allowed to transition.
>> +##    </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_domtrans_zookeeper_client',`
>> +    gen_require(`
>> +        type zookeeper_t, zookeeper_exec_t;
>> +    ')
>> +
>> +    corecmd_search_bin($1)
>> +    files_search_usr($1)
>> +    domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##    Execute zookeeper server in the
>> +##    zookeeper server domain.
>> +##</summary>
>> +##<param name="domain">
>> +##    <summary>
>> +##    Domain allowed to transition.
>> +##    </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_domtrans_zookeeper_server',`
>> +    gen_require(`
>> +        type zookeeper_server_t, zookeeper_server_exec_t;
>> +    ')
>> +
>> +    corecmd_search_bin($1)
>> +    files_search_usr($1)
>> +    domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##    Execute zookeeper server in the
>> +##    zookeeper domain.
>> +##</summary>
>> +##<param name="domain">
>> +##    <summary>
>> +##    Domain allowed to transition.
>> +##    </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_zookeeper_initrc_domtrans_server',`
>> +    gen_require(`
>> +        type zookeeper_server_initrc_exec_t;
>> +    ')
>> +
>> +    init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##    Execute zookeeper client in the
>> +##    zookeeper client domain, and allow the
>> +##    specified role the zookeeper client domain.
>> +##</summary>
>> +##<param name="domain">
>> +##    <summary>
>> +##    Domain allowed to transition.
>> +##    </summary>
>> +##</param>
>> +##<param name="role">
>> +##    <summary>
>> +##    Role allowed access.
>> +##    </summary>
>> +##</param>
>> +##<rolecap/>
>> +#
>> +interface(`hadoop_zookeeper_run_client',`
>> +    gen_require(`
>> +        type zookeeper_t;
>> +    ')
>> +
>> +    hadoop_domtrans_zookeeper_client($1)
>> +    role $2 types zookeeper_t;
>> +
>> +    allow $1 zookeeper_t:process { ptrace signal_perms };
>> +    ps_process_pattern($1, zookeeper_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##  Give permission to a domain to read
>> +##  hadoop_etc_t
>> +##</summary>
>> +##<param name="domain">
>> +##<summary>
>> +##  Domain needing read permission
>> +##</summary>
>> +##</param>
>> +#
>> +interface(`hadoop_read_config_files', `
>> +    gen_require(`
>> +        type hadoop_etc_t;
>> +    ')
>> +
>> +    allow $1 hadoop_etc_t:dir search_dir_perms;
>> +    allow $1 hadoop_etc_t:lnk_file { read getattr };
>> +    allow $1 hadoop_etc_t:file read_file_perms;
>> +')
>> +
>> +########################################
>> +##<summary>
>> +##  Give permission to a domain to
>> +##  execute hadoop_etc_t
>> +##</summary>
>> +##<param name="domain">
>> +##<summary>
>> +##  Domain needing read and execute
>> +##  permission
>> +##</summary>
>> +##</param>
>> +#
>> +interface(`hadoop_exec_config_files', `
>> +    gen_require(`
>> +        type hadoop_etc_t;
>> +    ')
>> +
>> +    hadoop_read_config_files($1)
>> +    allow $1 hadoop_etc_t:file { execute execute_no_trans};
>> +')
>> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
>> new file mode 100644
>> index 0000000..587c393
>> --- /dev/null
>> +++ b/policy/modules/services/hadoop.te
>> @@ -0,0 +1,379 @@
>> +policy_module(hadoop, 1.0.0)
>> +
>> +########################################
>> +#
>> +# Hadoop declarations.
>> +#
>> +
>> +attribute hadoop_domain;
>> +
>> +type hadoop_t;
>> +type hadoop_exec_t;
>> +application_domain(hadoop_t, hadoop_exec_t)
>> +ubac_constrained(hadoop_t)
>> +
>> +type hadoop_etc_t;
>> +files_config_file(hadoop_etc_t)
>> +
>> +type hadoop_var_lib_t;
>> +files_type(hadoop_var_lib_t)
>> +
>> +type hadoop_log_t;
>> +logging_log_file(hadoop_log_t)
>> +
>> +type hadoop_var_run_t;
>> +files_pid_file(hadoop_var_run_t)
>> +
>> +type hadoop_tmp_t;
>> +files_tmp_file(hadoop_tmp_t)
>> +ubac_constrained(hadoop_tmp_t)
>> +
>> +type hadoop_hsperfdata_t;
>> +files_tmp_file(hadoop_hsperfdata_t)
>> +ubac_constrained(hadoop_hsperfdata_t)
>> +
>> +hadoop_domain_template(datanode)
>> +hadoop_domain_template(jobtracker)
>> +hadoop_domain_template(namenode)
>> +hadoop_domain_template(secondarynamenode)
>> +hadoop_domain_template(tasktracker)
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper client declarations.
>> +#
>> +
>> +type zookeeper_t;
>> +type zookeeper_exec_t;
>> +application_domain(zookeeper_t, zookeeper_exec_t)
>> +ubac_constrained(zookeeper_t)
>> +
>> +type zookeeper_etc_t;
>> +files_config_file(zookeeper_etc_t)
>> +
>> +type zookeeper_log_t;
>> +logging_log_file(zookeeper_log_t)
>> +
>> +type zookeeper_tmp_t;
>> +files_tmp_file(zookeeper_tmp_t)
>> +ubac_constrained(zookeeper_tmp_t)
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper server declarations.
>> +#
>> +
>> +type zookeeper_server_t;
>> +type zookeeper_server_exec_t;
>> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
>> +
>> +type zookeeper_server_initrc_exec_t;
>> +init_script_file(zookeeper_server_initrc_exec_t)
>> +
>> +type zookeeper_server_var_t;
>> +files_type(zookeeper_server_var_t)
>> +
>> +# This will need a file context specification.
>> +type zookeeper_server_var_run_t;
>> +files_pid_file(zookeeper_server_var_run_t)
>> +
>> +type zookeeper_server_tmp_t;
>> +files_tmp_file(zookeeper_server_tmp_t)
>> +
>> +########################################
>> +#
>> +# Hadoop policy.
>> +#
>> +
>> +allow hadoop_t self:capability sys_resource;
>> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
>> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
>> +allow hadoop_t self:key write;
>> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
>> +allow hadoop_t self:udp_socket create_socket_perms;
>> +allow hadoop_t hadoop_domain:process signull;
>> +
>> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
>> +
>> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
>> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
>> +can_exec(hadoop_t, hadoop_etc_t)
>> +
>> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
>> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
>> +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
>> +
>> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
>> +
>> +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>> +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>> +
>> +kernel_read_network_state(hadoop_t)
>> +kernel_read_system_state(hadoop_t)
>> +
>> +corecmd_exec_bin(hadoop_t)
>> +corecmd_exec_shell(hadoop_t)
>> +
>> +corenet_all_recvfrom_unlabeled(hadoop_t)
>> +corenet_all_recvfrom_netlabel(hadoop_t)
>> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
>> +corenet_sendrecv_portmap_client_packets(hadoop_t)
>> +corenet_sendrecv_zope_client_packets(hadoop_t)
>> +corenet_tcp_bind_all_nodes(hadoop_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
>> +corenet_tcp_connect_portmap_port(hadoop_t)
>> +corenet_tcp_connect_zope_port(hadoop_t)
>> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
>> +corenet_tcp_sendrecv_all_ports(hadoop_t)
>> +corenet_tcp_sendrecv_generic_if(hadoop_t)
>> +# Hadoop uses high ordered random ports for services
>> +# If permanent ports are chosen, remove line below and lock down
>> +corenet_tcp_connect_generic_port(hadoop_t)
>> +corenet_udp_bind_all_nodes(hadoop_t)
>> +corenet_udp_sendrecv_all_nodes(hadoop_t)
>> +corenet_udp_sendrecv_all_ports(hadoop_t)
>> +corenet_udp_sendrecv_generic_if(hadoop_t)
>> +
>> +dev_read_rand(hadoop_t)
>> +dev_read_sysfs(hadoop_t)
>> +dev_read_urand(hadoop_t)
>> +
>> +files_dontaudit_search_spool(hadoop_t)
>> +files_read_usr_files(hadoop_t)
>> +files_read_all_files(hadoop_t)
>> +
>> +fs_getattr_xattr_fs(hadoop_t)
>> +
>> +java_exec(hadoop_t)
>> +
>> +miscfiles_read_localization(hadoop_t)
>> +
>> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
>> +userdom_use_user_terminals(hadoop_t)
>> +
>> +optional_policy(`
>> +    nis_use_ypbind(hadoop_t)
>> +')
>> +
>> +optional_policy(`
>> +    nscd_socket_use(hadoop_t)
>> +')
>> +
>> +########################################
>> +#
>> +# Hadoop datanode policy.
>> +#
>> +
>> +allow hadoop_datanode_t self:process signal;
>> +corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
>> +fs_getattr_xattr_fs(hadoop_datanode_t)
>> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +
>> +########################################
>> +#
>> +# Hadoop jobtracker policy.
>> +#
>> +
>> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
>> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
>> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
>> +
>> +########################################
>> +#
>> +# Hadoop namenode policy.
>> +#
>> +
>> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
>> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +
>> +########################################
>> +#
>> +# Hadoop secondary namenode policy.
>> +#
>> +
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
>> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +
>> +########################################
>> +#
>> +# Hadoop tasktracker policy.
>> +#
>> +
>> +allow hadoop_tasktracker_t self:process signal;
>> +
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
>> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
>> +
>> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
>> +fs_associate(hadoop_tasktracker_t)
>> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
>> +
>> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper client policy.
>> +#
>> +
>> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
>> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
>> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
>> +allow zookeeper_t self:udp_socket create_socket_perms;
>> +allow zookeeper_t zookeeper_server_t:process signull;
>> +
>> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
>> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
>> +
>> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
>> +
>> +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
>> +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>> +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>> +
>> +can_exec(zookeeper_t, zookeeper_exec_t)
>> +
>> +kernel_read_network_state(zookeeper_t)
>> +kernel_read_system_state(zookeeper_t)
>> +
>> +corecmd_exec_bin(zookeeper_t)
>> +corecmd_exec_shell(zookeeper_t)
>> +
>> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
>> +
>> +corenet_all_recvfrom_unlabeled(zookeeper_t)
>> +corenet_all_recvfrom_netlabel(zookeeper_t)
>> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
>> +corenet_tcp_bind_all_nodes(zookeeper_t)
>> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
>> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
>> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
>> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
>> +# Hadoop uses high ordered random ports for services
>> +# If permanent ports are chosen, remove line below and lock down
>> +corenet_tcp_connect_generic_port(zookeeper_t)
>> +corenet_udp_bind_all_nodes(zookeeper_t)
>> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
>> +corenet_udp_sendrecv_all_ports(zookeeper_t)
>> +corenet_udp_sendrecv_generic_if(zookeeper_t)
>> +
>> +dev_read_rand(zookeeper_t)
>> +dev_read_sysfs(zookeeper_t)
>> +dev_read_urand(zookeeper_t)
>> +
>> +files_read_etc_files(zookeeper_t)
>> +files_read_usr_files(zookeeper_t)
>> +
>> +miscfiles_read_localization(zookeeper_t)
>> +
>> +sysnet_read_config(zookeeper_t)
>> +
>> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
>> +userdom_use_user_terminals(zookeeper_t)
>> +
>> +java_exec(zookeeper_t)
>> +
>> +optional_policy(`
>> +    nscd_socket_use(zookeeper_t)
>> +')
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper server policy.
>> +#
>> +
>> +allow zookeeper_server_t self:capability kill;
>> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
>> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
>> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
>> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
>> +allow zookeeper_server_t self:udp_socket create_socket_perms;
>> +
>> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
>> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
>> +
>> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
>> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
>> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
>> +
>> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
>> +
>> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
>> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
>> +
>> +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
>> +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
>> +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
>> +
>> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
>> +
>> +kernel_read_network_state(zookeeper_server_t)
>> +kernel_read_system_state(zookeeper_server_t)
>> +
>> +corecmd_exec_bin(zookeeper_server_t)
>> +corecmd_exec_shell(zookeeper_server_t)
>> +
>> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
>> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
>> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
>> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
>> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
>> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
>> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
>> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
>> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
>> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
>> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
>> +# Hadoop uses high ordered random ports for services
>> +# If permanent ports are chosen, remove line below and lock down
>> +corenet_tcp_connect_generic_port(zookeeper_server_t)
>> +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
>> +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
>> +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
>> +corenet_udp_bind_all_nodes(zookeeper_server_t)
>> +
>> +dev_read_rand(zookeeper_server_t)
>> +dev_read_sysfs(zookeeper_server_t)
>> +dev_read_urand(zookeeper_server_t)
>> +
>> +files_read_etc_files(zookeeper_server_t)
>> +files_read_usr_files(zookeeper_server_t)
>> +
>> +fs_getattr_xattr_fs(zookeeper_server_t)
>> +
>> +logging_send_syslog_msg(zookeeper_server_t)
>> +
>> +miscfiles_read_localization(zookeeper_server_t)
>> +
>> +sysnet_read_config(zookeeper_server_t)
>> +
>> +java_exec(zookeeper_server_t)
>> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
>> index f976344..f1e6c9f 100644
>> --- a/policy/modules/system/unconfined.te
>> +++ b/policy/modules/system/unconfined.te
>> @@ -118,6 +118,10 @@ optional_policy(`
>>   ')
>>
>>   optional_policy(`
>> +    hadoop_run(unconfined_t, unconfined_r)
>> +')
>> +
>> +optional_policy(`
>>       inn_domtrans(unconfined_t)
>>   ')
>>
>> @@ -210,6 +214,10 @@ optional_policy(`
>>       xserver_domtrans(unconfined_t)
>>   ')
>>
>> +optional_policy(`
>> +    hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
>> +')
>> +
>>   ########################################
>>   #
>>   # Unconfined Execmem Local policy
> 
> 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-06 15:54 ` Paul Nuzzi
@ 2010-10-06 17:34   ` Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-10-06 17:34 UTC (permalink / raw)
  To: refpolicy

On Wed, Oct 06, 2010 at 11:54:40AM -0400, Paul Nuzzi wrote:
> On 10/06/2010 06:25 AM, Dominick Grift wrote:
> > Some more suggested changes.One of which is to not allow hadoop rc script domains and hadoop domain to not write log file (just append)
> > I wonder if this revision still works for you.
> > I am also still wondering about the file context specification. If i am correct, you state that some of them do not work for one reason or another. We should really try to make them all work else it does not make sense to specify them in the first place.
> 
> I've had an issue with /var/zookeeper(/.*) and /var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?.  It seems the kernel file_contexts hits on /var(/.*) => var_t before getting to /var/zookeeper(/.*) => zookeeper_server_var_t.  

I added some comments inline. Feel free to undo/change any thing i added that broken the policy.

That is strange /var/zookeeper(/.*)? should override /var(/.*)? for /var/zookeeper.
> 
> I briefly tested your policy on the NameNode/JobTracker/SecondaryNameNode.  It would not work in enforcing mode because you are not allowing the services to write log files. 

I guess that may be a bug in the various hadoop domains and/or something redirects to the log files. log files should be opened for append only.

But i guess we will have to allow it. I wonder why this does not work for hadoop but it seems to work for zookeeper.

> 
> Other comments inline.
> 
> > Signed-off-by: Dominick Grift <domg472@gmail.com>
> > ---
> > :100644 100644 2ecdde8... 73163db... M	policy/modules/kernel/corenetwork.te.in
> > :100644 100644 cad05ff... d2bc2b1... M	policy/modules/roles/sysadm.te
> > :000000 100644 0000000... 5935162... A	policy/modules/services/hadoop.fc
> > :000000 100644 0000000... cee7cd5... A	policy/modules/services/hadoop.if
> > :000000 100644 0000000... 515d2da... A	policy/modules/services/hadoop.te
> > :100644 100644 f976344... f1e6c9f... M	policy/modules/system/unconfined.te
> >  policy/modules/kernel/corenetwork.te.in |    5 +
> >  policy/modules/roles/sysadm.te          |    8 +
> >  policy/modules/services/hadoop.fc       |   55 ++++
> >  policy/modules/services/hadoop.if       |  364 +++++++++++++++++++++++++++
> >  policy/modules/services/hadoop.te       |  410 +++++++++++++++++++++++++++++++
> >  policy/modules/system/unconfined.te     |    8 +
> >  6 files changed, 850 insertions(+), 0 deletions(-)
> > 
> > diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> > index 2ecdde8..73163db 100644
> > --- a/policy/modules/kernel/corenetwork.te.in
> > +++ b/policy/modules/kernel/corenetwork.te.in
> > @@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
> >  network_port(git, tcp,9418,s0, udp,9418,s0)
> >  network_port(gopher, tcp,70,s0, udp,70,s0)
> >  network_port(gpsd, tcp,2947,s0)
> > +network_port(hadoop_datanode, tcp, 50010,s0)
> > +network_port(hadoop_namenode, tcp, 8020,s0)
> >  network_port(hddtemp, tcp,7634,s0)
> >  network_port(howl, tcp,5335,s0, udp,5353,s0)
> >  network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> > @@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
> >  network_port(xen, tcp,8002,s0)
> >  network_port(xfs, tcp,7100,s0)
> >  network_port(xserver, tcp,6000-6020,s0)
> > +network_port(zookeeper_client, tcp, 2181,s0)
> > +network_port(zookeeper_election, tcp, 3888,s0)
> > +network_port(zookeeper_leader, tcp, 2888,s0)
> >  network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
> >  network_port(zope, tcp,8021,s0)
> >  
> > diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> > index cad05ff..d2bc2b1 100644
> > --- a/policy/modules/roles/sysadm.te
> > +++ b/policy/modules/roles/sysadm.te
> > @@ -152,6 +152,10 @@ optional_policy(`
> >  ')
> >  
> >  optional_policy(`
> > +	hadoop_run(sysadm_t, sysadm_r)
> > +')
> > +
> > +optional_policy(`
> >  	# allow system administrator to use the ipsec script to look
> >  	# at things (e.g., ipsec auto --status)
> >  	# probably should create an ipsec_admin role for this kind of thing
> > @@ -392,6 +396,10 @@ optional_policy(`
> >  	yam_run(sysadm_t, sysadm_r)
> >  ')
> >  
> > +optional_policy(`
> > +	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
> > +')
> > +
> >  ifndef(`distro_redhat',`
> >  	optional_policy(`
> >  		auth_role(sysadm_r, sysadm_t)
> > diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> > new file mode 100644
> > index 0000000..5935162
> > --- /dev/null
> > +++ b/policy/modules/services/hadoop.fc
> > @@ -0,0 +1,55 @@
> > +/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
> > +
> > +# Why do these regular expresions differ from the ones below (/etc/rc.d/init.d)? Which of the two works best?
> 
> These work for Debian
> 
> > +/etc/init\.d/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> > +/etc/init\.d/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> > +/etc/init\.d/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> > +/etc/init\.d/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> > +/etc/init\.d/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> > +/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> > +
> 
> These work for Fedora/RHEL.  I haven't fully tested on Debian. Do we want to keep the Debian contexts?  

i am not referring to the location/path but i am referring to the regex like: -(.*)? that you use below but not above.
> 
> > +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> > +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> > +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> > +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> > +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> > +/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> > +
> > +/etc/zookeeper.*(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
> > +
> > +/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
> > +
> > +/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
> > +/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> > +
> > +/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> > +/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> > +
> > +/var/lib/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> > +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> > +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> > +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> > +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> > +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> > +
> > +/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> > +/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> > +/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> > +/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> > +/var/lock/subsys/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> > +
> > +/var/log/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_log_t,s0)
> > +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> > +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> > +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> > +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> > +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> > +/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> > +/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
> > +
> > +/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
> > +/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid			--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> > +/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid			--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> > +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> > +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> > +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> > diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> > new file mode 100644
> > index 0000000..cee7cd5
> > --- /dev/null
> > +++ b/policy/modules/services/hadoop.if
> > @@ -0,0 +1,364 @@
> > +## <summary>Software for reliable, scalable, distributed computing.</summary>
> > +
> > +#######################################
> > +## <summary>
> > +##	The template to define a hadoop domain.
> > +## </summary>
> > +## <param name="domain_prefix">
> > +##	<summary>
> > +##	Domain prefix to be used.
> > +##	</summary>
> > +## </param>
> > +#
> > +template(`hadoop_domain_template',`
> > +	gen_require(`
> > +		attribute hadoop_domain;
> > +		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> > +		type hadoop_exec_t, hadoop_hsperfdata_t, hadoop_etc_t;
> > +	')
> > +
> > +	########################################
> > +	#
> > +	# Shared declarations.
> > +	#
> > +
> > +	type hadoop_$1_t, hadoop_domain;
> > +	domain_type(hadoop_$1_t)
> > +	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> > +
> > +	type hadoop_$1_initrc_t;
> > +	type hadoop_$1_initrc_exec_t;
> > +	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> > +
> > +	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> > +
> > +	type hadoop_$1_lock_t;
> > +	files_lock_file(hadoop_$1_lock_t)
> > +
> > +	type hadoop_$1_log_t;
> > +	logging_log_file(hadoop_$1_log_t)
> > +
> > +	type hadoop_$1_var_lib_t;
> > +	files_type(hadoop_$1_var_lib_t)
> > +
> > +	type hadoop_$1_initrc_var_run_t;
> > +	files_pid_file(hadoop_$1_initrc_var_run_t)
> > +
> > +	type hadoop_$1_tmp_t;
> > +	files_tmp_file(hadoop_$1_tmp_t)
> > +
> > +	####################################
> > +	#
> > +	# Shared hadoop_$1 initrc policy.
> > +	#
> > +
> > +	allow hadoop_$1_initrc_t self:capability { setuid setgid };
> > +	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> > +	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> > +	allow hadoop_$1_initrc_t self:process setsched;
> > +
> > +	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> > +
> > +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> > +	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> > +
> > +	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
> > +	logging_search_logs(hadoop_$1_initrc_t)
> > +
> > +	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> > +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> > +
> > +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> > +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> > +	files_search_pids(hadoop_$1_initrc_t)
> > +
> > +	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> > +
> > +	can_exec(hadoop_$1_initrc_t, hadoop_etc_t)
> > +
> > +	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> > +	kernel_read_sysctl(hadoop_$1_initrc_t)
> > +	kernel_read_system_state(hadoop_$1_initrc_t)
> > +
> > +	corecmd_exec_bin(hadoop_$1_initrc_t)
> > +	corecmd_exec_shell(hadoop_$1_initrc_t)
> > +
> > +	files_dontaudit_list_default(hadoop_$1_initrc_t)
> > +	files_read_etc_files(hadoop_$1_initrc_t)
> > +	files_read_usr_files(hadoop_$1_initrc_t)
> > +
> > +	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> > +
> > +	init_rw_utmp(hadoop_$1_initrc_t)
> > +	init_use_script_ptys(hadoop_$1_initrc_t)
> > +
> > +	logging_send_audit_msgs(hadoop_$1_initrc_t)
> > +	logging_send_syslog_msg(hadoop_$1_initrc_t)
> > +
> > +	miscfiles_read_localization(hadoop_$1_initrc_t)
> > +
> > +	term_use_generic_ptys(hadoop_$1_initrc_t)
> > +
> > +	consoletype_exec(hadoop_$1_initrc_t)
> > +
> > +	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> > +
> > +	optional_policy(`
> > +		nscd_socket_use(hadoop_$1_initrc_t)
> > +	')
> > +
> > +	####################################
> > +	#
> > +	# Shared hadoop_$1 policy.
> > +	#
> > +
> > +	allow hadoop_$1_t self:process execmem;
> > +	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> > +	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> > +	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> > +	allow hadoop_$1_t self:udp_socket create_socket_perms;
> > +
> > +	allow hadoop_$1_t hadoop_domain:process signull;
> > +
> > +	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> > +	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> > +	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, { file dir })
> > +	files_search_var_lib(hadoop_$1_t)
> > +
> > +	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> > +	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
> > +
> > +	append_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	create_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	read_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	setattr_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> > +	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
> > +	logging_search_logs(hadoop_$1_t)
> > +
> > +	allow hadoop_$1_t hadoop_var_run_t:dir getattr_dir_perms;
> > +	files_search_pids(hadoop_$1_t)
> > +
> > +	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> > +	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
> > +
> > +	can_exec(hadoop_$1_t, hadoop_etc_t)
> > +
> > +	kernel_read_network_state(hadoop_$1_t)
> > +	kernel_read_system_state(hadoop_$1_t)
> > +
> > +	corecmd_exec_bin(hadoop_$1_t)
> > +	corecmd_exec_shell(hadoop_$1_t)
> > +
> > +	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> > +	corenet_all_recvfrom_netlabel(hadoop_$1_t)
> > +	corenet_tcp_bind_all_nodes(hadoop_$1_t)
> > +	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> > +	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> > +	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> > +	# Hadoop uses high ordered random ports for services
> > +	# If permanent ports are chosen, remove line below and lock down
> > +	corenet_tcp_connect_generic_port(hadoop_$1_t)
> > +	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> > +	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> > +	corenet_udp_bind_all_nodes(hadoop_$1_t)
> > +
> > +	dev_read_rand(hadoop_$1_t)
> > +	dev_read_urand(hadoop_$1_t)
> > +	dev_read_sysfs(hadoop_$1_t)
> > +
> > +	files_read_etc_files(hadoop_$1_t)
> > +
> > +	miscfiles_read_localization(hadoop_$1_t)
> > +
> > +	sysnet_read_config(hadoop_$1_t)
> > +
> > +	java_exec(hadoop_$1_t)
> > +
> > +	optional_policy(`
> > +		nscd_socket_use(hadoop_$1_t)
> > +	')
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Execute hadoop in the
> > +##	hadoop domain.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed to transition.
> > +##	</summary>
> > +## </param>
> > +#
> > +interface(`hadoop_domtrans',`
> > +	gen_require(`
> > +		type hadoop_t, hadoop_exec_t;
> > +	')
> > +
> > +	libs_search_lib($1)
> > +	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Execute hadoop in the hadoop domain,
> > +##	and allow the specified role the
> > +##	hadoop domain.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed to transition.
> > +##	</summary>
> > +## </param>
> > +## <param name="role">
> > +##	<summary>
> > +##	Role allowed access.
> > +##	</summary>
> > +## </param>
> > +## <rolecap/>
> > +#
> > +interface(`hadoop_run',`
> > +	gen_require(`
> > +		type hadoop_t;
> > +	')
> > +
> > +	hadoop_domtrans($1)
> > +	role $2 types hadoop_t;
> > +
> > +	allow $1 hadoop_t:process { ptrace signal_perms };
> > +	ps_process_pattern($1, hadoop_t)
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Execute zookeeper client in the
> > +##	zookeeper client domain.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed to transition.
> > +##	</summary>
> > +## </param>
> > +#
> > +interface(`hadoop_domtrans_zookeeper_client',`
> > +	gen_require(`
> > +		type zookeeper_t, zookeeper_exec_t;
> > +	')
> > +
> > +	corecmd_search_bin($1)
> > +	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Execute zookeeper server in the
> > +##	zookeeper server domain.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed to transition.
> > +##	</summary>
> > +## </param>
> > +#
> > +interface(`hadoop_domtrans_zookeeper_server',`
> > +	gen_require(`
> > +		type zookeeper_server_t, zookeeper_server_exec_t;
> > +	')
> > +
> > +	corecmd_search_bin($1)
> > +	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Execute zookeeper server in the
> > +##	zookeeper domain.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed to transition.
> > +##	</summary>
> > +## </param>
> > +#
> > +interface(`hadoop_zookeeper_initrc_domtrans_server',`
> > +	gen_require(`
> > +		type zookeeper_server_initrc_exec_t;
> > +	')
> > +
> > +	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Execute zookeeper client in the
> > +##	zookeeper client domain, and allow the
> > +##	specified role the zookeeper client domain.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed to transition.
> > +##	</summary>
> > +## </param>
> > +## <param name="role">
> > +##	<summary>
> > +##	Role allowed access.
> > +##	</summary>
> > +## </param>
> > +## <rolecap/>
> > +#
> > +interface(`hadoop_zookeeper_run_client',`
> > +	gen_require(`
> > +		type zookeeper_t;
> > +	')
> > +
> > +	hadoop_domtrans_zookeeper_client($1)
> > +	role $2 types zookeeper_t;
> > +
> > +	allow $1 zookeeper_t:process { ptrace signal_perms };
> > +	ps_process_pattern($1, zookeeper_t)
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Read hadoop configuration files.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed access.
> > +##	</summary>
> > +## </param>
> > +#
> > +interface(`hadoop_read_config_files',`
> > +	gen_require(`
> > +		type hadoop_etc_t;
> > +	')
> > +
> > +	files_search_etc($1)
> > +	read_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
> > +	read_lnk_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
> > +')
> > +
> > +########################################
> > +## <summary>
> > +##	Execute hadoop configuration files.
> > +## </summary>
> > +## <param name="domain">
> > +##	<summary>
> > +##	Domain allowed access.
> > +##	</summary>
> > +## </param>
> > +#
> > +interface(`hadoop_exec_config_files',`
> > +	gen_require(`
> > +		type hadoop_etc_t;
> > +	')
> > +
> > +	files_search_etc($1)
> > +	allow $1 hadoop_etc_t:dir search_dir_perms;
> > +	allow $1 hadoop_etc_t:lnk_file read_lnk_file_perms;
> > +	can_exec($1, hadoop_etc_t)
> > +')
> > diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> > new file mode 100644
> > index 0000000..515d2da
> > --- /dev/null
> > +++ b/policy/modules/services/hadoop.te
> > @@ -0,0 +1,410 @@
> > +policy_module(hadoop, 1.0.0)
> > +
> > +########################################
> > +#
> > +# Hadoop declarations.
> > +#
> > +
> > +attribute hadoop_domain;
> > +
> > +type hadoop_t;
> > +type hadoop_exec_t;
> > +application_domain(hadoop_t, hadoop_exec_t)
> > +ubac_constrained(hadoop_t)
> > +
> > +type hadoop_etc_t;
> > +files_config_file(hadoop_etc_t)
> > +
> > +type hadoop_var_lib_t;
> > +files_type(hadoop_var_lib_t)
> > +
> > +type hadoop_log_t;
> > +logging_log_file(hadoop_log_t)
> > +
> > +type hadoop_var_run_t;
> > +files_pid_file(hadoop_var_run_t)
> > +
> > +type hadoop_tmp_t;
> > +files_tmp_file(hadoop_tmp_t)
> > +ubac_constrained(hadoop_tmp_t)
> > +
> > +type hadoop_hsperfdata_t;
> > +files_tmp_file(hadoop_hsperfdata_t)
> > +ubac_constrained(hadoop_hsperfdata_t)
> > +
> > +hadoop_domain_template(datanode)
> > +hadoop_domain_template(jobtracker)
> > +hadoop_domain_template(namenode)
> > +hadoop_domain_template(secondarynamenode)
> > +hadoop_domain_template(tasktracker)
> > +
> > +########################################
> > +#
> > +# Hadoop zookeeper client declarations.
> > +#
> > +
> > +type zookeeper_t;
> > +type zookeeper_exec_t;
> > +application_domain(zookeeper_t, zookeeper_exec_t)
> > +ubac_constrained(zookeeper_t)
> > +
> > +type zookeeper_etc_t;
> > +files_config_file(zookeeper_etc_t)
> > +
> > +type zookeeper_log_t;
> > +logging_log_file(zookeeper_log_t)
> > +
> > +type zookeeper_tmp_t;
> > +files_tmp_file(zookeeper_tmp_t)
> > +ubac_constrained(zookeeper_tmp_t)
> > +
> > +########################################
> > +#
> > +# Hadoop zookeeper server declarations.
> > +#
> > +
> > +type zookeeper_server_t;
> > +type zookeeper_server_exec_t;
> > +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> > +
> > +type zookeeper_server_initrc_exec_t;
> > +init_script_file(zookeeper_server_initrc_exec_t)
> > +
> > +type zookeeper_server_var_t;
> > +files_type(zookeeper_server_var_t)
> > +
> > +type zookeeper_server_var_run_t;
> > +files_pid_file(zookeeper_server_var_run_t)
> > +
> > +type zookeeper_server_tmp_t;
> > +files_tmp_file(zookeeper_server_tmp_t)
> > +
> > +########################################
> > +#
> > +# Hadoop policy.
> > +#
> > +
> > +allow hadoop_t self:capability sys_resource;
> > +allow hadoop_t self:process { signal_perms setrlimit execmem };
> > +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> > +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> > +allow hadoop_t self:key write;
> > +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> > +allow hadoop_t self:udp_socket create_socket_perms;
> > +
> > +allow hadoop_t hadoop_domain:process signull;
> > +
> > +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> > +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> > +can_exec(hadoop_t, hadoop_etc_t)
> > +
> > +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +# not sure:
> > +files_search_var_lib(hadoop_t)
> > +
> > +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> > +# not sure:
> > +logging_search_logs(hadoop_t)
> > +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> > +# not sure:
> > +files_search_pids(hadoop_t)
> 
> I don't think you need files_search_var_lib, logging_search_logs or files_search_pids.

I wonder where that access is allowed, but if youre sure then this can be removed.
> 
> > +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> > +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> > +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> > +
> > +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> > +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> > +
> > +kernel_read_network_state(hadoop_t)
> > +kernel_read_system_state(hadoop_t)
> > +
> > +corecmd_exec_bin(hadoop_t)
> > +corecmd_exec_shell(hadoop_t)
> > +
> > +corenet_all_recvfrom_unlabeled(hadoop_t)
> > +corenet_all_recvfrom_netlabel(hadoop_t)
> > +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> > +corenet_sendrecv_portmap_client_packets(hadoop_t)
> > +corenet_sendrecv_zope_client_packets(hadoop_t)
> > +corenet_tcp_bind_all_nodes(hadoop_t)
> > +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> > +corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
> > +corenet_tcp_connect_portmap_port(hadoop_t)
> > +corenet_tcp_connect_zope_port(hadoop_t)
> > +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> > +corenet_tcp_sendrecv_all_ports(hadoop_t)
> > +corenet_tcp_sendrecv_generic_if(hadoop_t)
> > +# Hadoop uses high ordered random ports for services
> > +# If permanent ports are chosen, remove line below and lock down
> > +corenet_tcp_connect_generic_port(hadoop_t)
> > +corenet_udp_bind_all_nodes(hadoop_t)
> > +corenet_udp_sendrecv_all_nodes(hadoop_t)
> > +corenet_udp_sendrecv_all_ports(hadoop_t)
> > +corenet_udp_sendrecv_generic_if(hadoop_t)
> > +
> > +dev_read_rand(hadoop_t)
> > +dev_read_sysfs(hadoop_t)
> > +dev_read_urand(hadoop_t)
> > +
> > +files_dontaudit_list_default(hadoop_t)
> > +files_dontaudit_search_spool(hadoop_t)
> > +files_read_usr_files(hadoop_t)
> > +# Seems a bit coarse
> > +files_read_all_files(hadoop_t)
> 
> Needed for "hadoop fs -put" which adds any file to the distributed file system.
> Is there a finer grain alternative?

if any file, then i guess there is no better alternative.

> 
> > +fs_getattr_xattr_fs(hadoop_t)
> > +
> > +java_exec(hadoop_t)
> > +
> > +miscfiles_read_localization(hadoop_t)
> > +
> > +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> > +userdom_use_user_terminals(hadoop_t)
> > +
> > +optional_policy(`
> > +	nis_use_ypbind(hadoop_t)
> > +')
> > +
> > +optional_policy(`
> > +	nscd_socket_use(hadoop_t)
> > +')
> > +
> > +########################################
> > +#
> > +# Hadoop datanode policy.
> > +#
> > +
> > +allow hadoop_datanode_t self:process signal;
> > +
> > +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +
> > +corenet_sendrecv_hadoop_datanode_client_packets(hadoop_datanode_t)
> > +corenet_sendrecv_hadoop_datanode_server_packets(hadoop_datanode_t)
> > +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_datanode_t)
> > +corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
> > +corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
> > +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> > +
> > +fs_getattr_xattr_fs(hadoop_datanode_t)
> > +
> > +########################################
> > +#
> > +# Hadoop jobtracker policy.
> > +#
> > +
> > +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +
> > +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> > +
> > +corenet_sendrecv_hadoop_datanode_client_packets(hadoop_jobtracker_t)
> > +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_jobtracker_t)
> > +corenet_sendrecv_zope_server_packets(hadoop_jobtracker_t)
> > +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> > +corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
> > +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> > +
> > +########################################
> > +#
> > +# Hadoop namenode policy.
> > +#
> > +
> > +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +
> > +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_namenode_t)
> > +corenet_sendrecv_hadoop_namenode_server_packets(hadoop_namenode_t)
> > +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> > +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> > +
> > +########################################
> > +#
> > +# Hadoop secondary namenode policy.
> > +#
> > +
> > +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +
> > +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_secondarynamenode_t)
> > +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> > +
> > +########################################
> > +#
> > +# Hadoop tasktracker policy.
> > +#
> > +
> > +allow hadoop_tasktracker_t self:process signal;
> > +
> > +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> > +
> > +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> > +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> > +
> > +corenet_sendrecv_hadoop_datanode_client_packets(hadoop_tasktracker_t)
> > +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_tasktracker_t)
> > +corenet_sendrecv_zope_client_packets(hadoop_tasktracker_t)
> > +corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
> > +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> > +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> > +
> > +fs_associate(hadoop_tasktracker_t)
> > +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> > +
> > +########################################
> > +#
> > +# Hadoop zookeeper client policy.
> > +#
> > +
> > +allow zookeeper_t self:process { getsched signal_perms execmem };
> > +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> > +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> > +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> > +allow zookeeper_t self:udp_socket create_socket_perms;
> > +
> > +allow zookeeper_t zookeeper_server_t:process signull;
> > +
> > +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> > +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> > +
> > +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> > +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> > +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> > +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> > +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> > +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> > +
> > +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> > +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
> > +
> > +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> > +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
> > +
> > +can_exec(zookeeper_t, zookeeper_exec_t)
> > +
> > +kernel_read_network_state(zookeeper_t)
> > +kernel_read_system_state(zookeeper_t)
> > +
> > +corecmd_exec_bin(zookeeper_t)
> > +corecmd_exec_shell(zookeeper_t)
> > +
> > +corenet_all_recvfrom_unlabeled(zookeeper_t)
> > +corenet_all_recvfrom_netlabel(zookeeper_t)
> > +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> > +corenet_tcp_bind_all_nodes(zookeeper_t)
> > +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> > +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> > +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> > +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> > +# Hadoop uses high ordered random ports for services
> > +# If permanent ports are chosen, remove line below and lock down
> > +corenet_tcp_connect_generic_port(zookeeper_t)
> > +corenet_udp_bind_all_nodes(zookeeper_t)
> > +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> > +corenet_udp_sendrecv_all_ports(zookeeper_t)
> > +corenet_udp_sendrecv_generic_if(zookeeper_t)
> > +
> > +dev_read_rand(zookeeper_t)
> > +dev_read_sysfs(zookeeper_t)
> > +dev_read_urand(zookeeper_t)
> > +
> > +files_dontaudit_list_default(zookeeper_t)
> > +files_read_etc_files(zookeeper_t)
> > +files_read_usr_files(zookeeper_t)
> > +
> > +miscfiles_read_localization(zookeeper_t)
> > +
> > +sysnet_read_config(zookeeper_t)
> > +
> > +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> > +userdom_use_user_terminals(zookeeper_t)
> > +
> > +java_exec(zookeeper_t)
> > +
> > +optional_policy(`
> > +	nscd_socket_use(zookeeper_t)
> > +')
> > +
> > +########################################
> > +#
> > +# Hadoop zookeeper server policy.
> > +#
> > +
> > +allow zookeeper_server_t self:capability kill;
> > +allow zookeeper_server_t self:process { execmem getsched signal_perms };
> > +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> > +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> > +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> > +allow zookeeper_server_t self:udp_socket create_socket_perms;
> > +
> > +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> > +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> > +
> > +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> > +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> > +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> > +
> > +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> > +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> > +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> > +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> > +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> > +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> > +
> > +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> > +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> > +
> > +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> > +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
> > +
> > +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> > +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
> > +
> > +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> > +
> > +kernel_read_network_state(zookeeper_server_t)
> > +kernel_read_system_state(zookeeper_server_t)
> > +
> > +corecmd_exec_bin(zookeeper_server_t)
> > +corecmd_exec_shell(zookeeper_server_t)
> > +
> > +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> > +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> > +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> > +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> > +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> > +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> > +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> > +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> > +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> > +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> > +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> > +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> > +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> > +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> > +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> > +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> > +# Hadoop uses high ordered random ports for services
> > +# If permanent ports are chosen, remove line below and lock down
> > +corenet_tcp_connect_generic_port(zookeeper_server_t)
> > +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
> > +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
> > +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
> > +corenet_udp_bind_all_nodes(zookeeper_server_t)
> > +
> > +dev_read_rand(zookeeper_server_t)
> > +dev_read_sysfs(zookeeper_server_t)
> > +dev_read_urand(zookeeper_server_t)
> > +
> > +files_read_etc_files(zookeeper_server_t)
> > +files_read_usr_files(zookeeper_server_t)
> > +
> > +fs_getattr_xattr_fs(zookeeper_server_t)
> > +
> > +logging_send_syslog_msg(zookeeper_server_t)
> > +
> > +miscfiles_read_localization(zookeeper_server_t)
> > +
> > +sysnet_read_config(zookeeper_server_t)
> > +
> > +java_exec(zookeeper_server_t)
> > diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> > index f976344..f1e6c9f 100644
> > --- a/policy/modules/system/unconfined.te
> > +++ b/policy/modules/system/unconfined.te
> > @@ -118,6 +118,10 @@ optional_policy(`
> >  ')
> >  
> >  optional_policy(`
> > +	hadoop_run(unconfined_t, unconfined_r)
> > +')
> > +
> > +optional_policy(`
> >  	inn_domtrans(unconfined_t)
> >  ')
> >  
> > @@ -210,6 +214,10 @@ optional_policy(`
> >  	xserver_domtrans(unconfined_t)
> >  ')
> >  
> > +optional_policy(`
> > +	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
> > +')
> > +
> >  ########################################
> >  #
> >  # Unconfined Execmem Local policy
> > 
> > 
> > 
> > _______________________________________________
> > refpolicy mailing list
> > refpolicy at oss.tresys.com
> > http://oss.tresys.com/mailman/listinfo/refpolicy
> 
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101006/4ed62cfb/attachment-0001.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-10-06 10:25 Dominick Grift
@ 2010-10-06 15:54 ` Paul Nuzzi
  2010-10-06 17:34   ` Dominick Grift
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-10-06 15:54 UTC (permalink / raw)
  To: refpolicy

On 10/06/2010 06:25 AM, Dominick Grift wrote:
> Some more suggested changes.One of which is to not allow hadoop rc script domains and hadoop domain to not write log file (just append)
> I wonder if this revision still works for you.
> I am also still wondering about the file context specification. If i am correct, you state that some of them do not work for one reason or another. We should really try to make them all work else it does not make sense to specify them in the first place.

I've had an issue with /var/zookeeper(/.*) and /var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?.  It seems the kernel file_contexts hits on /var(/.*) => var_t before getting to /var/zookeeper(/.*) => zookeeper_server_var_t.  

I briefly tested your policy on the NameNode/JobTracker/SecondaryNameNode.  It would not work in enforcing mode because you are not allowing the services to write log files. 

Other comments inline.

> Signed-off-by: Dominick Grift <domg472@gmail.com>
> ---
> :100644 100644 2ecdde8... 73163db... M	policy/modules/kernel/corenetwork.te.in
> :100644 100644 cad05ff... d2bc2b1... M	policy/modules/roles/sysadm.te
> :000000 100644 0000000... 5935162... A	policy/modules/services/hadoop.fc
> :000000 100644 0000000... cee7cd5... A	policy/modules/services/hadoop.if
> :000000 100644 0000000... 515d2da... A	policy/modules/services/hadoop.te
> :100644 100644 f976344... f1e6c9f... M	policy/modules/system/unconfined.te
>  policy/modules/kernel/corenetwork.te.in |    5 +
>  policy/modules/roles/sysadm.te          |    8 +
>  policy/modules/services/hadoop.fc       |   55 ++++
>  policy/modules/services/hadoop.if       |  364 +++++++++++++++++++++++++++
>  policy/modules/services/hadoop.te       |  410 +++++++++++++++++++++++++++++++
>  policy/modules/system/unconfined.te     |    8 +
>  6 files changed, 850 insertions(+), 0 deletions(-)
> 
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..73163db 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
>  network_port(git, tcp,9418,s0, udp,9418,s0)
>  network_port(gopher, tcp,70,s0, udp,70,s0)
>  network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_datanode, tcp, 50010,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
>  network_port(hddtemp, tcp,7634,s0)
>  network_port(howl, tcp,5335,s0, udp,5353,s0)
>  network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
>  network_port(xen, tcp,8002,s0)
>  network_port(xfs, tcp,7100,s0)
>  network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
>  network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
>  network_port(zope, tcp,8021,s0)
>  
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index cad05ff..d2bc2b1 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
>  ')
>  
>  optional_policy(`
> +	hadoop_run(sysadm_t, sysadm_r)
> +')
> +
> +optional_policy(`
>  	# allow system administrator to use the ipsec script to look
>  	# at things (e.g., ipsec auto --status)
>  	# probably should create an ipsec_admin role for this kind of thing
> @@ -392,6 +396,10 @@ optional_policy(`
>  	yam_run(sysadm_t, sysadm_r)
>  ')
>  
> +optional_policy(`
> +	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
>  ifndef(`distro_redhat',`
>  	optional_policy(`
>  		auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..5935162
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,55 @@
> +/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +# Why do these regular expresions differ from the ones below (/etc/rc.d/init.d)? Which of the two works best?

These work for Debian

> +/etc/init\.d/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +

These work for Fedora/RHEL.  I haven't fully tested on Debian. Do we want to keep the Debian contexts?  

> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper.*(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid			--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid			--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..cee7cd5
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,364 @@
> +## <summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +## <summary>
> +##	The template to define a hadoop domain.
> +## </summary>
> +## <param name="domain_prefix">
> +##	<summary>
> +##	Domain prefix to be used.
> +##	</summary>
> +## </param>
> +#
> +template(`hadoop_domain_template',`
> +	gen_require(`
> +		attribute hadoop_domain;
> +		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> +		type hadoop_exec_t, hadoop_hsperfdata_t, hadoop_etc_t;
> +	')
> +
> +	########################################
> +	#
> +	# Shared declarations.
> +	#
> +
> +	type hadoop_$1_t, hadoop_domain;
> +	domain_type(hadoop_$1_t)
> +	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> +	type hadoop_$1_initrc_t;
> +	type hadoop_$1_initrc_exec_t;
> +	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> +	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> +	type hadoop_$1_lock_t;
> +	files_lock_file(hadoop_$1_lock_t)
> +
> +	type hadoop_$1_log_t;
> +	logging_log_file(hadoop_$1_log_t)
> +
> +	type hadoop_$1_var_lib_t;
> +	files_type(hadoop_$1_var_lib_t)
> +
> +	type hadoop_$1_initrc_var_run_t;
> +	files_pid_file(hadoop_$1_initrc_var_run_t)
> +
> +	type hadoop_$1_tmp_t;
> +	files_tmp_file(hadoop_$1_tmp_t)
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 initrc policy.
> +	#
> +
> +	allow hadoop_$1_initrc_t self:capability { setuid setgid };
> +	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_initrc_t self:process setsched;
> +
> +	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> +
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> +	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> +	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
> +	logging_search_logs(hadoop_$1_initrc_t)
> +
> +	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> +	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> +	files_search_pids(hadoop_$1_initrc_t)
> +
> +	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> +
> +	can_exec(hadoop_$1_initrc_t, hadoop_etc_t)
> +
> +	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> +	kernel_read_sysctl(hadoop_$1_initrc_t)
> +	kernel_read_system_state(hadoop_$1_initrc_t)
> +
> +	corecmd_exec_bin(hadoop_$1_initrc_t)
> +	corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> +	files_dontaudit_list_default(hadoop_$1_initrc_t)
> +	files_read_etc_files(hadoop_$1_initrc_t)
> +	files_read_usr_files(hadoop_$1_initrc_t)
> +
> +	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> +	init_rw_utmp(hadoop_$1_initrc_t)
> +	init_use_script_ptys(hadoop_$1_initrc_t)
> +
> +	logging_send_audit_msgs(hadoop_$1_initrc_t)
> +	logging_send_syslog_msg(hadoop_$1_initrc_t)
> +
> +	miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> +	term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> +	consoletype_exec(hadoop_$1_initrc_t)
> +
> +	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_initrc_t)
> +	')
> +
> +	####################################
> +	#
> +	# Shared hadoop_$1 policy.
> +	#
> +
> +	allow hadoop_$1_t self:process execmem;
> +	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> +	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> +	allow hadoop_$1_t self:udp_socket create_socket_perms;
> +
> +	allow hadoop_$1_t hadoop_domain:process signull;
> +
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> +	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, { file dir })
> +	files_search_var_lib(hadoop_$1_t)
> +
> +	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
> +
> +	append_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	create_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	read_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	setattr_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> +	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
> +	logging_search_logs(hadoop_$1_t)
> +
> +	allow hadoop_$1_t hadoop_var_run_t:dir getattr_dir_perms;
> +	files_search_pids(hadoop_$1_t)
> +
> +	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> +	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
> +
> +	can_exec(hadoop_$1_t, hadoop_etc_t)
> +
> +	kernel_read_network_state(hadoop_$1_t)
> +	kernel_read_system_state(hadoop_$1_t)
> +
> +	corecmd_exec_bin(hadoop_$1_t)
> +	corecmd_exec_shell(hadoop_$1_t)
> +
> +	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> +	corenet_all_recvfrom_netlabel(hadoop_$1_t)
> +	corenet_tcp_bind_all_nodes(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> +	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> +	# Hadoop uses high ordered random ports for services
> +	# If permanent ports are chosen, remove line below and lock down
> +	corenet_tcp_connect_generic_port(hadoop_$1_t)
> +	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> +	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> +	corenet_udp_bind_all_nodes(hadoop_$1_t)
> +
> +	dev_read_rand(hadoop_$1_t)
> +	dev_read_urand(hadoop_$1_t)
> +	dev_read_sysfs(hadoop_$1_t)
> +
> +	files_read_etc_files(hadoop_$1_t)
> +
> +	miscfiles_read_localization(hadoop_$1_t)
> +
> +	sysnet_read_config(hadoop_$1_t)
> +
> +	java_exec(hadoop_$1_t)
> +
> +	optional_policy(`
> +		nscd_socket_use(hadoop_$1_t)
> +	')
> +')
> +
> +########################################
> +## <summary>
> +##	Execute hadoop in the
> +##	hadoop domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_domtrans',`
> +	gen_require(`
> +		type hadoop_t, hadoop_exec_t;
> +	')
> +
> +	libs_search_lib($1)
> +	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute hadoop in the hadoop domain,
> +##	and allow the specified role the
> +##	hadoop domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +## <param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`hadoop_run',`
> +	gen_require(`
> +		type hadoop_t;
> +	')
> +
> +	hadoop_domtrans($1)
> +	role $2 types hadoop_t;
> +
> +	allow $1 hadoop_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_domtrans_zookeeper_client',`
> +	gen_require(`
> +		type zookeeper_t, zookeeper_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper server in the
> +##	zookeeper server domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_domtrans_zookeeper_server',`
> +	gen_require(`
> +		type zookeeper_server_t, zookeeper_server_exec_t;
> +	')
> +
> +	corecmd_search_bin($1)
> +	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper server in the
> +##	zookeeper domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_zookeeper_initrc_domtrans_server',`
> +	gen_require(`
> +		type zookeeper_server_initrc_exec_t;
> +	')
> +
> +	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute zookeeper client in the
> +##	zookeeper client domain, and allow the
> +##	specified role the zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed to transition.
> +##	</summary>
> +## </param>
> +## <param name="role">
> +##	<summary>
> +##	Role allowed access.
> +##	</summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`hadoop_zookeeper_run_client',`
> +	gen_require(`
> +		type zookeeper_t;
> +	')
> +
> +	hadoop_domtrans_zookeeper_client($1)
> +	role $2 types zookeeper_t;
> +
> +	allow $1 zookeeper_t:process { ptrace signal_perms };
> +	ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Read hadoop configuration files.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed access.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_read_config_files',`
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	files_search_etc($1)
> +	read_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
> +	read_lnk_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
> +')
> +
> +########################################
> +## <summary>
> +##	Execute hadoop configuration files.
> +## </summary>
> +## <param name="domain">
> +##	<summary>
> +##	Domain allowed access.
> +##	</summary>
> +## </param>
> +#
> +interface(`hadoop_exec_config_files',`
> +	gen_require(`
> +		type hadoop_etc_t;
> +	')
> +
> +	files_search_etc($1)
> +	allow $1 hadoop_etc_t:dir search_dir_perms;
> +	allow $1 hadoop_etc_t:lnk_file read_lnk_file_perms;
> +	can_exec($1, hadoop_etc_t)
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..515d2da
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,410 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +type hadoop_hsperfdata_t;
> +files_tmp_file(hadoop_hsperfdata_t)
> +ubac_constrained(hadoop_hsperfdata_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { signal_perms setrlimit execmem };
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +
> +allow hadoop_t hadoop_domain:process signull;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +# not sure:
> +files_search_var_lib(hadoop_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +# not sure:
> +logging_search_logs(hadoop_t)
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +# not sure:
> +files_search_pids(hadoop_t)

I don't think you need files_search_var_lib, logging_search_logs or files_search_pids.

> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(hadoop_t)
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_list_default(hadoop_t)
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +# Seems a bit coarse
> +files_read_all_files(hadoop_t)

Needed for "hadoop fs -put" which adds any file to the distributed file system.
Is there a finer grain alternative?

> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> +	nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> +	nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +allow hadoop_datanode_t self:process signal;
> +
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +corenet_sendrecv_hadoop_datanode_client_packets(hadoop_datanode_t)
> +corenet_sendrecv_hadoop_datanode_server_packets(hadoop_datanode_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_datanode_t)
> +corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +
> +corenet_sendrecv_hadoop_datanode_client_packets(hadoop_jobtracker_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_jobtracker_t)
> +corenet_sendrecv_zope_server_packets(hadoop_jobtracker_t)
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_namenode_t)
> +corenet_sendrecv_hadoop_namenode_server_packets(hadoop_namenode_t)
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_secondarynamenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +
> +corenet_sendrecv_hadoop_datanode_client_packets(hadoop_tasktracker_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_tasktracker_t)
> +corenet_sendrecv_zope_client_packets(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +
> +fs_associate(hadoop_tasktracker_t)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched signal_perms execmem };
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
> +
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(zookeeper_t)
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_dontaudit_list_default(zookeeper_t)
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> +	nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched signal_perms };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_server_t self:udp_socket create_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
> +
> +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_udp_bind_all_nodes(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..f1e6c9f 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
>  ')
>  
>  optional_policy(`
> +	hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
>  	inn_domtrans(unconfined_t)
>  ')
>  
> @@ -210,6 +214,10 @@ optional_policy(`
>  	xserver_domtrans(unconfined_t)
>  ')
>  
> +optional_policy(`
> +	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
>  ########################################
>  #
>  # Unconfined Execmem Local policy
> 
> 
> 
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-10-06 10:25 Dominick Grift
  2010-10-06 15:54 ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-10-06 10:25 UTC (permalink / raw)
  To: refpolicy

Some more suggested changes.One of which is to not allow hadoop rc script domains and hadoop domain to not write log file (just append)
I wonder if this revision still works for you.
I am also still wondering about the file context specification. If i am correct, you state that some of them do not work for one reason or another. We should really try to make them all work else it does not make sense to specify them in the first place.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 73163db... M	policy/modules/kernel/corenetwork.te.in
:100644 100644 cad05ff... d2bc2b1... M	policy/modules/roles/sysadm.te
:000000 100644 0000000... 5935162... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... cee7cd5... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 515d2da... A	policy/modules/services/hadoop.te
:100644 100644 f976344... f1e6c9f... M	policy/modules/system/unconfined.te
 policy/modules/kernel/corenetwork.te.in |    5 +
 policy/modules/roles/sysadm.te          |    8 +
 policy/modules/services/hadoop.fc       |   55 ++++
 policy/modules/services/hadoop.if       |  364 +++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  410 +++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 +
 6 files changed, 850 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..73163db 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_datanode, tcp, 50010,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..d2bc2b1 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5935162
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,55 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+# Why do these regular expresions differ from the ones below (/etc/rc.d/init.d)? Which of the two works best?
+/etc/init\.d/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper.*(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid			--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid			--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..cee7cd5
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,364 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t, hadoop_hsperfdata_t, hadoop_etc_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	can_exec(hadoop_$1_initrc_t, hadoop_etc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	files_dontaudit_list_default(hadoop_$1_initrc_t)
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	consoletype_exec(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t self:process execmem;
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, { file dir })
+	files_search_var_lib(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+
+	append_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_t)
+
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr_dir_perms;
+	files_search_pids(hadoop_$1_t)
+
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+	can_exec(hadoop_$1_t, hadoop_etc_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	# Hadoop uses high ordered random ports for services
+	# If permanent ports are chosen, remove line below and lock down
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+
+	files_read_etc_files(hadoop_$1_t)
+
+	miscfiles_read_localization(hadoop_$1_t)
+
+	sysnet_read_config(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	hadoop_domtrans_zookeeper_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Read hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_read_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	read_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+	read_lnk_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_exec_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file read_lnk_file_perms;
+	can_exec($1, hadoop_etc_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..515d2da
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,410 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { signal_perms setrlimit execmem };
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+allow hadoop_t hadoop_domain:process signull;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+# not sure:
+files_search_var_lib(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+# not sure:
+logging_search_logs(hadoop_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+# not sure:
+files_search_pids(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_list_default(hadoop_t)
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+# Seems a bit coarse
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_datanode_server_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_datanode_t)
+corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+
+fs_getattr_xattr_fs(hadoop_datanode_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_zope_server_packets(hadoop_jobtracker_t)
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_namenode_t)
+corenet_sendrecv_hadoop_namenode_server_packets(hadoop_namenode_t)
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_secondarynamenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_zope_client_packets(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+
+fs_associate(hadoop_tasktracker_t)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched signal_perms execmem };
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_dontaudit_list_default(zookeeper_t)
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched signal_perms };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..f1e6c9f 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101006/82b17324/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-10-06 10:06 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-10-06 10:06 UTC (permalink / raw)
  To: refpolicy

I tried to clean up the latest revision a bit. There are also a few things that leave me wondering. Most of which i commented.

My take on hadoop.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 73163db... M	policy/modules/kernel/corenetwork.te.in
:100644 100644 cad05ff... d2bc2b1... M	policy/modules/roles/sysadm.te
:000000 100644 0000000... 5935162... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 69519f0... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 6a79d31... A	policy/modules/services/hadoop.te
:100644 100644 f976344... f1e6c9f... M	policy/modules/system/unconfined.te
 policy/modules/kernel/corenetwork.te.in |    5 +
 policy/modules/roles/sysadm.te          |    8 +
 policy/modules/services/hadoop.fc       |   55 ++++
 policy/modules/services/hadoop.if       |  358 +++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  411 +++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 +
 6 files changed, 845 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..73163db 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_datanode, tcp, 50010,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..d2bc2b1 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5935162
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,55 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+# Why do these regular expresions differ from the ones below (/etc/rc.d/init.d)? Which of the two works best?
+/etc/init\.d/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper.*(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid			--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid			--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..69519f0
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,358 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t, hadoop_hsperfdata_t, hadoop_etc_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, { dir file })
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	can_exec(hadoop_$1_initrc_t, hadoop_etc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	files_dontaudit_list_default(hadoop_$1_initrc_t)
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	consoletype_exec(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t self:process execmem;
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, { file dir })
+	files_search_var_lib(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_t)
+
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr_dir_perms;
+	files_search_pids(hadoop_$1_t)
+
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+	can_exec(hadoop_$1_t, hadoop_etc_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	# Hadoop uses high ordered random ports for services
+	# If permanent ports are chosen, remove line below and lock down
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+
+	files_read_etc_files(hadoop_$1_t)
+
+	miscfiles_read_localization(hadoop_$1_t)
+
+	sysnet_read_config(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	hadoop_domtrans_zookeeper_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Read hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_read_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	read_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+	read_lnk_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_exec_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file read_lnk_file_perms;
+	can_exec($1, hadoop_etc_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..6a79d31
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,411 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { signal_perms setrlimit execmem };
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+allow hadoop_t hadoop_domain:process signull;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+# not sure:
+files_search_var_lib(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+# not sure:
+logging_search_logs(hadoop_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+# not sure:
+files_search_pids(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_list_default(hadoop_t)
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+# Seems a bit coarse
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_datanode_server_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_datanode_t)
+corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+
+fs_getattr_xattr_fs(hadoop_datanode_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_zope_server_packets(hadoop_jobtracker_t)
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_namenode_t)
+corenet_sendrecv_hadoop_namenode_server_packets(hadoop_namenode_t)
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_secondarynamenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_zope_client_packets(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+
+fs_associate(hadoop_tasktracker_t)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched signal_perms execmem };
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_dontaudit_list_default(zookeeper_t)
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched signal_perms };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..f1e6c9f 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101006/8a1f9c21/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-23 14:53 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-23 14:53 UTC (permalink / raw)
  To: refpolicy

Fixed the required exec type for hadoop in hadoop_domtrans.

Moved some stuff from rc to main domains as i suspect its not the rc scripts requiring this.

maybe some other trivial changes.\x18

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 5c66ae4... A	policy/modules/services/hadoop.if
:000000 100644 0000000... e947a6b... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   40 ++++
 policy/modules/services/hadoop.if       |  241 +++++++++++++++++++++
 policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
 4 files changed, 632 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..d88b5ff
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,40 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_t,s0)
+
+/var/lock/subsys/hadoop-datanode	--	gen_context(system_u:object_r:hadoop_datanode_initrc_lock_t,s0)
+/var/lock/subsys/hadoop-namenode	--	gen_context(system_u:object_r:hadoop_namenode_initrc_lock_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop								-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-datanode.pid	--	gen_context(system_u:object_r:hadoop_datanode_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-namenode.pid	--	gen_context(system_u:object_r:hadoop_namenode_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..5c66ae4
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,241 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	append_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_t)
+
+	allow hadoop_$1_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..e947a6b
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,347 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_server_t)
+')
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100923/050f6165/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-23 13:54         ` Paul Nuzzi
@ 2010-09-23 14:40           ` Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-23 14:40 UTC (permalink / raw)
  To: refpolicy

On 09/23/2010 03:54 PM, Paul Nuzzi wrote:
> On 09/21/2010 01:08 PM, Dominick Grift wrote:
>> On 09/21/2010 06:34 PM, Paul Nuzzi wrote:
>>> On 09/21/2010 12:14 PM, Dominick Grift wrote:
>>>> On 09/21/2010 05:42 PM, Paul Nuzzi wrote:
>>>>> On 09/21/2010 05:02 AM, Dominick Grift wrote:
>>>>>> Well ive rewritten the policy as much as i ca with the information that i currently have.
>>>>>> Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.
>>>>>
>>>>> The hadoop_domain policy is basic stuff that most programs share, plus a few hadoop specific things.  I initially had separate functions for initrc and hadoop type policy.
>>>>> Since we are not exporting hadoop specific functionality to other modules I removed them from the .if file. 
>>>>
>>>> With that in mind, it looks like the policy has some duplicate rules.
>>>>
>>>>>> Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
>>>>>> A lot of other things that arent, clear and/ or make no sense.
>>>>>> I have also left out things that i think, should be handled differently.
>>>>>
>>>>> hadoop_t is for the hadoop executable which is /usr/bin/hadoop.  It does basic file system stuff, submits jobs and administers the cluster.
>>>>
>>>> And who what runs it? who or/and what transitions to the hadoop_t domain?
>>>
>>> All of the users run it.  Users and the sysadm need to transition to the hadoop_t domain.
>>
>> ok so you can transition to it by creating a custom module with the
>> following:
>>
>> hadoop_run(sysadm_t, sysadm_r)
>>
>> Can you confirm that this works?
> 
> They are transitioning correctly for sysadm_u.

Thanks
> 
>>>>>> It would be cool if someone could test this policy and provide feedback in the shape of avc denials.
>>>>>
>>>>> I was able to get zookeeper server and client to run.  Here is the audit2allow in permissive mode.  Ignore the networking avcs.  I didn't port the networking functions since it was built as a module.
>>>>> Zookeeper client doesn't domtrans into a domain.  There is an semodule insert error. hadoop_tasktracker_data_t needs to be modified.
>>>>
>>>> Thanks i fixed that file context specification now.
>>>>
>>>> Were you able to run the init script domains in permissive mode? Does it
>>>> work when you use run_init? Do the initrc domains properly transition to
>>>> the main domains in permissive mode?
>>>  
>>> None of the pseudo initrc domains transitioned to the target domain using run_init.  
>>
>> Any avc denials related to this? Because the domain transitions are
>> specified in policy (example: hadoop_datanode_initrc_t -> hadoop_exec_t
>> -> hadoop_datanode_t)
>>
>>>
>>>> Could you provides some avc denials of that?
>>
>>> There doesn't seem to be any denials for the domtrans.
>>
>> So the domain transition does not occur but no avc denials are showed?
>> that is strange.
>> maybe semodule -DB will expose some related information. Also check for
>> SELINUX_ERR (grep -i SELINUX_ERR /var/log/audit/audit.log)
>>
>> Are the executables properly labelled?
> 
> I don't know if you changed anything with the new patch but they seem to be transitioning correctly.
> I added a separate module with hadoop_run(sysadm_t, sysadm_r) and hadoop_run(unconfined_t, unconfined_r).
> To get it to compile you need to add a gen_require hadoop_exec_t to hadoop_domtrans.  

Thanks fixed the hadoop_exec_t requirement. I did change some trivial
things, not sure if they are related to it though.

> 
>>>
>>>> You should also specify file contexts for the pid files and lock files.
>>>
>>> system_u:system_r:hadoop_datanode_initrc_t:s0 0 S 489 3125 1  1 80 0 - 579640 futex_ ?   00:00:02 java
>>> system_u:system_r:hadoop_namenode_initrc_t:s0 0 S 489 3376 1  2 80 0 - 581189 futex_ ?   00:00:02 java
>>> system_u:system_r:zookeeper_server_t:s0 0 S 488 3598 1  0 80   0 - 496167 futex_ ?       00:00:00 java
>>>
>>> -rw-r--r--. hadoop hadoop system_u:object_r:hadoop_datanode_var_run_t:s0 hadoop-hadoop-datanode.pid
>>> -rw-r--r--. hadoop hadoop system_u:object_r:hadoop_namenode_var_run_t:s0 hadoop-hadoop-namenode.pid
>>
>>>
>>> -rw-r--r--. root root system_u:object_r:hadoop_datanode_initrc_lock_t:s0 /var/lock/subsys/hadoop-datanode
>>> -rw-r--r--. root root system_u:object_r:hadoop_namenode_initrc_lock_t:s0 /var/lock/subsys/hadoop-namenode
>>>
>>>>>
>>>>> #============= zookeeper_server_t ==============
>>>>> allow zookeeper_server_t java_exec_t:file { read getattr open execute execute_no_trans };
>>>>> allow zookeeper_server_t net_conf_t:file { read getattr open };
>>>>> allow zookeeper_server_t port_t:tcp_socket { name_bind name_connect };
>>>>
>>>> What port is it connecting and binding sockets to? Why are they not
>>>> labelled?
>>>
>>> I left out the networking since I built it as a module.  I haven't had luck running refpolicy on Fedora.  The corenet_* functions might need to be written if refpolicy doesn't want all the ports permanently defined.
>>
>> You could patch fedoras selinux-policy rpm that is what i usually do.
>> Anyways i will just assume its the ports we declared.
>>>
>>>>> allow zookeeper_server_t self:process execmem;
>>>>> allow zookeeper_server_t self:tcp_socket { setopt read bind create accept write getattr connect shutdown listen };
>>>>>
>>>>
>>>> I will add the above rules to the policy that i have, except for the
>>>> bind/connect to generic port types as this seems like a bad idea to me.
>>>
>>> I think I left out binding to generic ports in my policy.
>>>
>>>> Were there no denials left for the zookeeper client? Did you use
>>>> zookeeper_run_client() to transition to the zookeeper_t domain?
>>>
>>> zookeeper_client transitioned to the unconfined_java_t domain so there were no denials. I ran your patched policy without any modifications.  
>>>
>>
>> Because you probably ran is in the unconfined domain. You should use the
>> zookeeper_run_client() that my patch provides, so that you can
>> transition to the confined domain.
> 
> Looks like that transitions correctly when I add zookeeper_run_client(unconfined_t, unconfined_r).
> Thanks for taking an interest in the patch.  How do we want to merge your changes with mine?

Honestly i think there are some mistakes in your version, and i suspect
that i adopted some of those mistakes into my patch.

For example. in my patch i allow the hadoop rc scripts to create pid
files. but from your feedback i strongly suspect that the rc scripts do
not create the pid files:

>>> -rw-r--r--. hadoop hadoop
system_u:object_r:hadoop_datanode_var_run_t:s0 hadoop-hadoop-datanode.pid

This shows that a process running as the hadoop user created the hadoop
datanode pid file. If the rc script would have created it , it wouldnt
be hadoop but root i suspect.

My opinion is that it is best to start over and use my patch as a clean
base.

Then we can extend my patch with any raw AVC denials.

I will post a new patch soon with the hadoop_domtrans fix and with the
pid filetrans removed for rc script domains (and maybe remove some other
things that i dont fully trust.)

Just so you know, we also hang out on IRC. That medium is a bit better
for collaboration/ interactivity than maillists, especially with the
larger projects.

>  
>> I will add file context specifications for the locks and pids you have
>> reported.
>>
>>>>>> Some properties of this policy:
>>>>>>
>>>>>> The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
>>>>>> To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
>>>>>> The zookeeper server seems to be an ordinary init daemon domain.
>>>>>> Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.
>>>>>>
>>>>>> Signed-off-by: Dominick Grift <domg472@gmail.com>
>>>>>> _______________________________________________
>>>>>> refpolicy mailing list
>>>>>> refpolicy at oss.tresys.com
>>>>>> http://oss.tresys.com/mailman/listinfo/refpolicy
>>>>
>>>>
>>>>
>>>>
>>>> _______________________________________________
>>>> refpolicy mailing list
>>>> refpolicy at oss.tresys.com
>>>> http://oss.tresys.com/mailman/listinfo/refpolicy
>>>
>>
>>
> 


-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 261 bytes
Desc: OpenPGP digital signature
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100923/287ad29b/attachment.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21 17:08       ` Dominick Grift
@ 2010-09-23 13:54         ` Paul Nuzzi
  2010-09-23 14:40           ` Dominick Grift
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-23 13:54 UTC (permalink / raw)
  To: refpolicy

On 09/21/2010 01:08 PM, Dominick Grift wrote:
> On 09/21/2010 06:34 PM, Paul Nuzzi wrote:
>> On 09/21/2010 12:14 PM, Dominick Grift wrote:
>>> On 09/21/2010 05:42 PM, Paul Nuzzi wrote:
>>>> On 09/21/2010 05:02 AM, Dominick Grift wrote:
>>>>> Well ive rewritten the policy as much as i ca with the information that i currently have.
>>>>> Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.
>>>>
>>>> The hadoop_domain policy is basic stuff that most programs share, plus a few hadoop specific things.  I initially had separate functions for initrc and hadoop type policy.
>>>> Since we are not exporting hadoop specific functionality to other modules I removed them from the .if file. 
>>>
>>> With that in mind, it looks like the policy has some duplicate rules.
>>>
>>>>> Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
>>>>> A lot of other things that arent, clear and/ or make no sense.
>>>>> I have also left out things that i think, should be handled differently.
>>>>
>>>> hadoop_t is for the hadoop executable which is /usr/bin/hadoop.  It does basic file system stuff, submits jobs and administers the cluster.
>>>
>>> And who what runs it? who or/and what transitions to the hadoop_t domain?
>>
>> All of the users run it.  Users and the sysadm need to transition to the hadoop_t domain.
> 
> ok so you can transition to it by creating a custom module with the
> following:
> 
> hadoop_run(sysadm_t, sysadm_r)
> 
> Can you confirm that this works?

They are transitioning correctly for sysadm_u.

>>>>> It would be cool if someone could test this policy and provide feedback in the shape of avc denials.
>>>>
>>>> I was able to get zookeeper server and client to run.  Here is the audit2allow in permissive mode.  Ignore the networking avcs.  I didn't port the networking functions since it was built as a module.
>>>> Zookeeper client doesn't domtrans into a domain.  There is an semodule insert error. hadoop_tasktracker_data_t needs to be modified.
>>>
>>> Thanks i fixed that file context specification now.
>>>
>>> Were you able to run the init script domains in permissive mode? Does it
>>> work when you use run_init? Do the initrc domains properly transition to
>>> the main domains in permissive mode?
>>  
>> None of the pseudo initrc domains transitioned to the target domain using run_init.  
> 
> Any avc denials related to this? Because the domain transitions are
> specified in policy (example: hadoop_datanode_initrc_t -> hadoop_exec_t
> -> hadoop_datanode_t)
> 
>>
>>> Could you provides some avc denials of that?
> 
>> There doesn't seem to be any denials for the domtrans.
> 
> So the domain transition does not occur but no avc denials are showed?
> that is strange.
> maybe semodule -DB will expose some related information. Also check for
> SELINUX_ERR (grep -i SELINUX_ERR /var/log/audit/audit.log)
> 
> Are the executables properly labelled?

I don't know if you changed anything with the new patch but they seem to be transitioning correctly.
I added a separate module with hadoop_run(sysadm_t, sysadm_r) and hadoop_run(unconfined_t, unconfined_r).
To get it to compile you need to add a gen_require hadoop_exec_t to hadoop_domtrans.  

>>
>>> You should also specify file contexts for the pid files and lock files.
>>
>> system_u:system_r:hadoop_datanode_initrc_t:s0 0 S 489 3125 1  1 80 0 - 579640 futex_ ?   00:00:02 java
>> system_u:system_r:hadoop_namenode_initrc_t:s0 0 S 489 3376 1  2 80 0 - 581189 futex_ ?   00:00:02 java
>> system_u:system_r:zookeeper_server_t:s0 0 S 488 3598 1  0 80   0 - 496167 futex_ ?       00:00:00 java
>>
>> -rw-r--r--. hadoop hadoop system_u:object_r:hadoop_datanode_var_run_t:s0 hadoop-hadoop-datanode.pid
>> -rw-r--r--. hadoop hadoop system_u:object_r:hadoop_namenode_var_run_t:s0 hadoop-hadoop-namenode.pid
> 
>>
>> -rw-r--r--. root root system_u:object_r:hadoop_datanode_initrc_lock_t:s0 /var/lock/subsys/hadoop-datanode
>> -rw-r--r--. root root system_u:object_r:hadoop_namenode_initrc_lock_t:s0 /var/lock/subsys/hadoop-namenode
>>
>>>>
>>>> #============= zookeeper_server_t ==============
>>>> allow zookeeper_server_t java_exec_t:file { read getattr open execute execute_no_trans };
>>>> allow zookeeper_server_t net_conf_t:file { read getattr open };
>>>> allow zookeeper_server_t port_t:tcp_socket { name_bind name_connect };
>>>
>>> What port is it connecting and binding sockets to? Why are they not
>>> labelled?
>>
>> I left out the networking since I built it as a module.  I haven't had luck running refpolicy on Fedora.  The corenet_* functions might need to be written if refpolicy doesn't want all the ports permanently defined.
> 
> You could patch fedoras selinux-policy rpm that is what i usually do.
> Anyways i will just assume its the ports we declared.
>>
>>>> allow zookeeper_server_t self:process execmem;
>>>> allow zookeeper_server_t self:tcp_socket { setopt read bind create accept write getattr connect shutdown listen };
>>>>
>>>
>>> I will add the above rules to the policy that i have, except for the
>>> bind/connect to generic port types as this seems like a bad idea to me.
>>
>> I think I left out binding to generic ports in my policy.
>>
>>> Were there no denials left for the zookeeper client? Did you use
>>> zookeeper_run_client() to transition to the zookeeper_t domain?
>>
>> zookeeper_client transitioned to the unconfined_java_t domain so there were no denials. I ran your patched policy without any modifications.  
>>
> 
> Because you probably ran is in the unconfined domain. You should use the
> zookeeper_run_client() that my patch provides, so that you can
> transition to the confined domain.

Looks like that transitions correctly when I add zookeeper_run_client(unconfined_t, unconfined_r).
Thanks for taking an interest in the patch.  How do we want to merge your changes with mine?
 
> I will add file context specifications for the locks and pids you have
> reported.
> 
>>>>> Some properties of this policy:
>>>>>
>>>>> The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
>>>>> To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
>>>>> The zookeeper server seems to be an ordinary init daemon domain.
>>>>> Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.
>>>>>
>>>>> Signed-off-by: Dominick Grift <domg472@gmail.com>
>>>>> _______________________________________________
>>>>> refpolicy mailing list
>>>>> refpolicy at oss.tresys.com
>>>>> http://oss.tresys.com/mailman/listinfo/refpolicy
>>>
>>>
>>>
>>>
>>> _______________________________________________
>>> refpolicy mailing list
>>> refpolicy at oss.tresys.com
>>> http://oss.tresys.com/mailman/listinfo/refpolicy
>>
> 
> 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21 16:34     ` Paul Nuzzi
  2010-09-21 17:08       ` Dominick Grift
@ 2010-09-21 19:55       ` Jeremy Solt
  1 sibling, 0 replies; 37+ messages in thread
From: Jeremy Solt @ 2010-09-21 19:55 UTC (permalink / raw)
  To: refpolicy


> >> The hadoop_domain policy is basic stuff that most programs share, plus a few hadoop specific things.  I initially had separate functions for initrc and hadoop type policy.
> >> Since we are not exporting hadoop specific functionality to other modules I removed them from the .if file. 
> > 
Could you send an updated patch so I can see these changes? 


-- 
Jeremy J. Solt
Tresys Technology, LLC
410-290-1411 x122

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21 16:34     ` Paul Nuzzi
@ 2010-09-21 17:08       ` Dominick Grift
  2010-09-23 13:54         ` Paul Nuzzi
  2010-09-21 19:55       ` Jeremy Solt
  1 sibling, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-21 17:08 UTC (permalink / raw)
  To: refpolicy

On 09/21/2010 06:34 PM, Paul Nuzzi wrote:
> On 09/21/2010 12:14 PM, Dominick Grift wrote:
>> On 09/21/2010 05:42 PM, Paul Nuzzi wrote:
>>> On 09/21/2010 05:02 AM, Dominick Grift wrote:
>>>> Well ive rewritten the policy as much as i ca with the information that i currently have.
>>>> Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.
>>>
>>> The hadoop_domain policy is basic stuff that most programs share, plus a few hadoop specific things.  I initially had separate functions for initrc and hadoop type policy.
>>> Since we are not exporting hadoop specific functionality to other modules I removed them from the .if file. 
>>
>> With that in mind, it looks like the policy has some duplicate rules.
>>
>>>> Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
>>>> A lot of other things that arent, clear and/ or make no sense.
>>>> I have also left out things that i think, should be handled differently.
>>>
>>> hadoop_t is for the hadoop executable which is /usr/bin/hadoop.  It does basic file system stuff, submits jobs and administers the cluster.
>>
>> And who what runs it? who or/and what transitions to the hadoop_t domain?
> 
> All of the users run it.  Users and the sysadm need to transition to the hadoop_t domain.

ok so you can transition to it by creating a custom module with the
following:

hadoop_run(sysadm_t, sysadm_r)

Can you confirm that this works?

>>>> It would be cool if someone could test this policy and provide feedback in the shape of avc denials.
>>>
>>> I was able to get zookeeper server and client to run.  Here is the audit2allow in permissive mode.  Ignore the networking avcs.  I didn't port the networking functions since it was built as a module.
>>> Zookeeper client doesn't domtrans into a domain.  There is an semodule insert error. hadoop_tasktracker_data_t needs to be modified.
>>
>> Thanks i fixed that file context specification now.
>>
>> Were you able to run the init script domains in permissive mode? Does it
>> work when you use run_init? Do the initrc domains properly transition to
>> the main domains in permissive mode?
>  
> None of the pseudo initrc domains transitioned to the target domain using run_init.  

Any avc denials related to this? Because the domain transitions are
specified in policy (example: hadoop_datanode_initrc_t -> hadoop_exec_t
-> hadoop_datanode_t)

> 
>> Could you provides some avc denials of that?

> There doesn't seem to be any denials for the domtrans.

So the domain transition does not occur but no avc denials are showed?
that is strange.
maybe semodule -DB will expose some related information. Also check for
SELINUX_ERR (grep -i SELINUX_ERR /var/log/audit/audit.log)

Are the executables properly labelled?

> 
>> You should also specify file contexts for the pid files and lock files.
> 
> system_u:system_r:hadoop_datanode_initrc_t:s0 0 S 489 3125 1  1 80 0 - 579640 futex_ ?   00:00:02 java
> system_u:system_r:hadoop_namenode_initrc_t:s0 0 S 489 3376 1  2 80 0 - 581189 futex_ ?   00:00:02 java
> system_u:system_r:zookeeper_server_t:s0 0 S 488 3598 1  0 80   0 - 496167 futex_ ?       00:00:00 java
> 
> -rw-r--r--. hadoop hadoop system_u:object_r:hadoop_datanode_var_run_t:s0 hadoop-hadoop-datanode.pid
> -rw-r--r--. hadoop hadoop system_u:object_r:hadoop_namenode_var_run_t:s0 hadoop-hadoop-namenode.pid

>
> -rw-r--r--. root root system_u:object_r:hadoop_datanode_initrc_lock_t:s0 /var/lock/subsys/hadoop-datanode
> -rw-r--r--. root root system_u:object_r:hadoop_namenode_initrc_lock_t:s0 /var/lock/subsys/hadoop-namenode
> 
>>>
>>> #============= zookeeper_server_t ==============
>>> allow zookeeper_server_t java_exec_t:file { read getattr open execute execute_no_trans };
>>> allow zookeeper_server_t net_conf_t:file { read getattr open };
>>> allow zookeeper_server_t port_t:tcp_socket { name_bind name_connect };
>>
>> What port is it connecting and binding sockets to? Why are they not
>> labelled?
> 
> I left out the networking since I built it as a module.  I haven't had luck running refpolicy on Fedora.  The corenet_* functions might need to be written if refpolicy doesn't want all the ports permanently defined.

You could patch fedoras selinux-policy rpm that is what i usually do.
Anyways i will just assume its the ports we declared.
> 
>>> allow zookeeper_server_t self:process execmem;
>>> allow zookeeper_server_t self:tcp_socket { setopt read bind create accept write getattr connect shutdown listen };
>>>
>>
>> I will add the above rules to the policy that i have, except for the
>> bind/connect to generic port types as this seems like a bad idea to me.
> 
> I think I left out binding to generic ports in my policy.
> 
>> Were there no denials left for the zookeeper client? Did you use
>> zookeeper_run_client() to transition to the zookeeper_t domain?
> 
> zookeeper_client transitioned to the unconfined_java_t domain so there were no denials. I ran your patched policy without any modifications.  
> 

Because you probably ran is in the unconfined domain. You should use the
zookeeper_run_client() that my patch provides, so that you can
transition to the confined domain.

I will add file context specifications for the locks and pids you have
reported.

>>>> Some properties of this policy:
>>>>
>>>> The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
>>>> To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
>>>> The zookeeper server seems to be an ordinary init daemon domain.
>>>> Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.
>>>>
>>>> Signed-off-by: Dominick Grift <domg472@gmail.com>
>>>> _______________________________________________
>>>> refpolicy mailing list
>>>> refpolicy at oss.tresys.com
>>>> http://oss.tresys.com/mailman/listinfo/refpolicy
>>
>>
>>
>>
>> _______________________________________________
>> refpolicy mailing list
>> refpolicy at oss.tresys.com
>> http://oss.tresys.com/mailman/listinfo/refpolicy
> 


-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 261 bytes
Desc: OpenPGP digital signature
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/e6e993c9/attachment.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21 16:14   ` Dominick Grift
@ 2010-09-21 16:34     ` Paul Nuzzi
  2010-09-21 17:08       ` Dominick Grift
  2010-09-21 19:55       ` Jeremy Solt
  0 siblings, 2 replies; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-21 16:34 UTC (permalink / raw)
  To: refpolicy

On 09/21/2010 12:14 PM, Dominick Grift wrote:
> On 09/21/2010 05:42 PM, Paul Nuzzi wrote:
>> On 09/21/2010 05:02 AM, Dominick Grift wrote:
>>> Well ive rewritten the policy as much as i ca with the information that i currently have.
>>> Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.
>>
>> The hadoop_domain policy is basic stuff that most programs share, plus a few hadoop specific things.  I initially had separate functions for initrc and hadoop type policy.
>> Since we are not exporting hadoop specific functionality to other modules I removed them from the .if file. 
> 
> With that in mind, it looks like the policy has some duplicate rules.
> 
>>> Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
>>> A lot of other things that arent, clear and/ or make no sense.
>>> I have also left out things that i think, should be handled differently.
>>
>> hadoop_t is for the hadoop executable which is /usr/bin/hadoop.  It does basic file system stuff, submits jobs and administers the cluster.
> 
> And who what runs it? who or/and what transitions to the hadoop_t domain?

All of the users run it.  Users and the sysadm need to transition to the hadoop_t domain.
 
>>> It would be cool if someone could test this policy and provide feedback in the shape of avc denials.
>>
>> I was able to get zookeeper server and client to run.  Here is the audit2allow in permissive mode.  Ignore the networking avcs.  I didn't port the networking functions since it was built as a module.
>> Zookeeper client doesn't domtrans into a domain.  There is an semodule insert error. hadoop_tasktracker_data_t needs to be modified.
> 
> Thanks i fixed that file context specification now.
> 
> Were you able to run the init script domains in permissive mode? Does it
> work when you use run_init? Do the initrc domains properly transition to
> the main domains in permissive mode?
 
None of the pseudo initrc domains transitioned to the target domain using run_init.  

> Could you provides some avc denials of that?

There doesn't seem to be any denials for the domtrans.

> You should also specify file contexts for the pid files and lock files.

system_u:system_r:hadoop_datanode_initrc_t:s0 0 S 489 3125 1  1 80 0 - 579640 futex_ ?   00:00:02 java
system_u:system_r:hadoop_namenode_initrc_t:s0 0 S 489 3376 1  2 80 0 - 581189 futex_ ?   00:00:02 java
system_u:system_r:zookeeper_server_t:s0 0 S 488 3598 1  0 80   0 - 496167 futex_ ?       00:00:00 java

-rw-r--r--. hadoop hadoop system_u:object_r:hadoop_datanode_var_run_t:s0 hadoop-hadoop-datanode.pid
-rw-r--r--. hadoop hadoop system_u:object_r:hadoop_namenode_var_run_t:s0 hadoop-hadoop-namenode.pid

-rw-r--r--. root root system_u:object_r:hadoop_datanode_initrc_lock_t:s0 /var/lock/subsys/hadoop-datanode
-rw-r--r--. root root system_u:object_r:hadoop_namenode_initrc_lock_t:s0 /var/lock/subsys/hadoop-namenode

>>
>> #============= zookeeper_server_t ==============
>> allow zookeeper_server_t java_exec_t:file { read getattr open execute execute_no_trans };
>> allow zookeeper_server_t net_conf_t:file { read getattr open };
>> allow zookeeper_server_t port_t:tcp_socket { name_bind name_connect };
> 
> What port is it connecting and binding sockets to? Why are they not
> labelled?

I left out the networking since I built it as a module.  I haven't had luck running refpolicy on Fedora.  The corenet_* functions might need to be written if refpolicy doesn't want all the ports permanently defined.

>> allow zookeeper_server_t self:process execmem;
>> allow zookeeper_server_t self:tcp_socket { setopt read bind create accept write getattr connect shutdown listen };
>>
> 
> I will add the above rules to the policy that i have, except for the
> bind/connect to generic port types as this seems like a bad idea to me.

I think I left out binding to generic ports in my policy.

> Were there no denials left for the zookeeper client? Did you use
> zookeeper_run_client() to transition to the zookeeper_t domain?

zookeeper_client transitioned to the unconfined_java_t domain so there were no denials. I ran your patched policy without any modifications.  

>>> Some properties of this policy:
>>>
>>> The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
>>> To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
>>> The zookeeper server seems to be an ordinary init daemon domain.
>>> Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.
>>>
>>> Signed-off-by: Dominick Grift <domg472@gmail.com>
>>> _______________________________________________
>>> refpolicy mailing list
>>> refpolicy at oss.tresys.com
>>> http://oss.tresys.com/mailman/listinfo/refpolicy
> 
> 
> 
> 
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-21 16:29 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-21 16:29 UTC (permalink / raw)
  To: refpolicy

Here is a new version with most of the feedback applied.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... 896ceda... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 2a5fe66... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   35 +++
 policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
 4 files changed, 633 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..896ceda
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,35 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?		gen_context(system_u:object_r:hadoop_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..2a5fe66
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,247 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+	domain_transition_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	corecmd_exec_all_executables(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..53a242b
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,347 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+# What or who runs this?
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_server_t)
+')
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/3e9abc06/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21 15:42 ` Paul Nuzzi
@ 2010-09-21 16:14   ` Dominick Grift
  2010-09-21 16:34     ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-21 16:14 UTC (permalink / raw)
  To: refpolicy

On 09/21/2010 05:42 PM, Paul Nuzzi wrote:
> On 09/21/2010 05:02 AM, Dominick Grift wrote:
>> Well ive rewritten the policy as much as i ca with the information that i currently have.
>> Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.
> 
> The hadoop_domain policy is basic stuff that most programs share, plus a few hadoop specific things.  I initially had separate functions for initrc and hadoop type policy.
> Since we are not exporting hadoop specific functionality to other modules I removed them from the .if file. 

With that in mind, it looks like the policy has some duplicate rules.

>> Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
>> A lot of other things that arent, clear and/ or make no sense.
>> I have also left out things that i think, should be handled differently.
> 
> hadoop_t is for the hadoop executable which is /usr/bin/hadoop.  It does basic file system stuff, submits jobs and administers the cluster.

And who what runs it? who or/and what transitions to the hadoop_t domain?

>> It would be cool if someone could test this policy and provide feedback in the shape of avc denials.
> 
> I was able to get zookeeper server and client to run.  Here is the audit2allow in permissive mode.  Ignore the networking avcs.  I didn't port the networking functions since it was built as a module.
> Zookeeper client doesn't domtrans into a domain.  There is an semodule insert error. hadoop_tasktracker_data_t needs to be modified.

Thanks i fixed that file context specification now.

Were you able to run the init script domains in permissive mode? Does it
work when you use run_init? Do the initrc domains properly transition to
the main domains in permissive mode?

Could you provides some avc denials of that?

You should also specify file contexts for the pid files and lock files.

> 
> #============= zookeeper_server_t ==============
> allow zookeeper_server_t java_exec_t:file { read getattr open execute execute_no_trans };
> allow zookeeper_server_t net_conf_t:file { read getattr open };
> allow zookeeper_server_t port_t:tcp_socket { name_bind name_connect };

What port is it connecting and binding sockets to? Why are they not
labelled?

> allow zookeeper_server_t self:process execmem;
> allow zookeeper_server_t self:tcp_socket { setopt read bind create accept write getattr connect shutdown listen };
> 

I will add the above rules to the policy that i have, except for the
bind/connect to generic port types as this seems like a bad idea to me.

Were there no denials left for the zookeeper client? Did you use
zookeeper_run_client() to transition to the zookeeper_t domain?

>> Some properties of this policy:
>>
>> The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
>> To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
>> The zookeeper server seems to be an ordinary init daemon domain.
>> Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.
>>
>> Signed-off-by: Dominick Grift <domg472@gmail.com>
>> _______________________________________________
>> refpolicy mailing list
>> refpolicy at oss.tresys.com
>> http://oss.tresys.com/mailman/listinfo/refpolicy


-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 261 bytes
Desc: OpenPGP digital signature
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/d41757b3/attachment.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-21  9:02 Dominick Grift
@ 2010-09-21 15:42 ` Paul Nuzzi
  2010-09-21 16:14   ` Dominick Grift
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-21 15:42 UTC (permalink / raw)
  To: refpolicy

On 09/21/2010 05:02 AM, Dominick Grift wrote:
> Well ive rewritten the policy as much as i ca with the information that i currently have.
> Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.

The hadoop_domain policy is basic stuff that most programs share, plus a few hadoop specific things.  I initially had separate functions for initrc and hadoop type policy.
Since we are not exporting hadoop specific functionality to other modules I removed them from the .if file. 

> Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
> A lot of other things that arent, clear and/ or make no sense.
> I have also left out things that i think, should be handled differently.

hadoop_t is for the hadoop executable which is /usr/bin/hadoop.  It does basic file system stuff, submits jobs and administers the cluster.

> It would be cool if someone could test this policy and provide feedback in the shape of avc denials.

I was able to get zookeeper server and client to run.  Here is the audit2allow in permissive mode.  Ignore the networking avcs.  I didn't port the networking functions since it was built as a module.
Zookeeper client doesn't domtrans into a domain.  There is an semodule insert error. hadoop_tasktracker_data_t needs to be modified.

#============= zookeeper_server_t ==============
allow zookeeper_server_t java_exec_t:file { read getattr open execute execute_no_trans };
allow zookeeper_server_t net_conf_t:file { read getattr open };
allow zookeeper_server_t port_t:tcp_socket { name_bind name_connect };
allow zookeeper_server_t self:process execmem;
allow zookeeper_server_t self:tcp_socket { setopt read bind create accept write getattr connect shutdown listen };

> Some properties of this policy:
> 
> The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
> To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
> The zookeeper server seems to be an ordinary init daemon domain.
> Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.
> 
> Signed-off-by: Dominick Grift <domg472@gmail.com>
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-21  9:02 Dominick Grift
  2010-09-21 15:42 ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-21  9:02 UTC (permalink / raw)
  To: refpolicy

Well ive rewritten the policy as much as i ca with the information that i currently have.
Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.
Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
A lot of other things that arent, clear and/ or make no sense.
I have also left out things that i think, should be handled differently.

It would be cool if someone could test this policy and provide feedback in the shape of avc denials.

Some properties of this policy:

The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
The zookeeper server seems to be an ordinary init daemon domain.
Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... bce5d29... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 462d851... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 880f09a... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   34 +++
 policy/modules/services/hadoop.if       |  294 +++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  339 +++++++++++++++++++++++++++++++
 4 files changed, 671 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..bce5d29
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,34 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_data_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?		gen_context(system_u:object_r:hadoop_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..462d851
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,294 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+
+	hadoop_exec_entry_type(hadoop_$1_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_exec_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	hadoop_spec_domtrans(hadoop_$1_initrc_t, hadoop_$1_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	corecmd_exec_all_executables(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Make hadoop executable files an
+##	entrypoint for the specified domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	The domain for which hadoop_exec_t
+##	is an entrypoint.
+##	</summary>
+## </param>
+#
+interface(`hadoop_exec_entry_type',`
+	gen_require(`
+		type hadoop_exec_t;
+	')
+
+	domain_entry_file($1, hadoop_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop executable files
+##	in the specified domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="target_domain">
+##	<summary>
+##	Domain to transition to.
+##	</summary>
+## </param>
+#
+interface(`hadoop_spec_domtrans',`
+	gen_require(`
+		type hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domain_transition_pattern($1, hadoop_exec_t, $2)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..880f09a
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,339 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+# What or who runs this?
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/58913164/attachment.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-20 22:24 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-20 22:24 UTC (permalink / raw)
  To: refpolicy

I made a start on my take on this policy. This is what i have so far.

I still have a lot of questions though.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... 3ae8107... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 7e43690... A	policy/modules/services/hadoop.if
:000000 100644 0000000... b132803... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   35 ++++
 policy/modules/services/hadoop.if       |  332 +++++++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  219 ++++++++++++++++++++
 4 files changed, 590 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..3ae8107
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,35 @@
+/etc/hadoop(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+/etc/hadoop-0.20(/.*)?		gen_context(system_u:object_r:hadoop_etc_t,s0)
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server__t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_data_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?		gen_context(system_u:object_r:hadoop_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..7e43690
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,332 @@
+## <summary>The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing.</summary>
+## <desc>
+##	<p>
+##		Hadoop Common: The common utilities that support the
+##		other Hadoop subprojects.
+##	</p>
+##	<p>
+##		Chukwa: A data collection system for managing large
+##		distributed systems.
+##	</p>
+##	<p>
+##		HBase: A scalable, distributed database that supports
+##		structured data storage for large tables.
+##	</p>
+##	<p>
+##		HDFS: A distributed file system that provides high
+##		throughput access to application data.
+##	</p>
+##	<p>
+##		Hive: A data warehouse infrastructure that provides
+##		data summarization and ad hoc querying.
+##	</p>
+##	<p>
+##		MapReduce: A software framework for distributed
+##		processing of large data sets on compute clusters.
+##	</p>
+##	<p>
+##		Pig: A high-level data-flow language and execution
+##		framework for parallel computation.
+##	</p>
+##	<p>
+##		ZooKeeper: A high-performance coordination service for
+##		distributed applications.
+##	</p>
+## </desc>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <desc>
+##	<p>
+##	This template creates a domain to be used for
+##	a new hadoop daemon.
+##	</p>
+## </desc>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain, hadoop_initrc_domain, hadoop_lib_file;
+		attribute hadoop_pid_file, hadoop_log_file, hadoop_tmp_file;
+		type hadoop_var_lib_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+
+	type hadoop_$1_initrc_t, hadoop_initrc_domain;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_datanode_$1_exec_t)
+	role system_r types hadoop_$1_initrc_t;
+
+	type hadoop_$1_var_lib_t, hadoop_lib_file;
+	files_type(hadoop_$1_var_lib_t)
+
+	type hadoop_$1_log_t, hadoop_log_file;
+	logging_log_file(hadoop_$1_log_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t, hadoop_pid_file;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t, hadoop_tmp_file;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	####################################
+	#
+	# Shared policy.
+	#
+
+	allow hadoop_domain self:process signull;
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute the hadoop executable file
+##	in the specified domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="target_domain">
+##	<summary>
+##	Domain to transition to.
+##	</summary>
+## </param>
+#
+interface(`hadoop_spec_domtrans',`
+	gen_require(`
+		type hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib(
+	domain_transition_pattern($1, hadoop_exec_t, $2)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop lib
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_var_lib_filetrans',`
+	gen_require(`
+		type hadoop_var_lib_t;
+	')
+
+	files_search_var_lib($1)
+	filetrans_pattern($1, hadoop_var_lib_t, $2, $3)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop log
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_log_filetrans',`
+	gen_require(`
+		type hadoop_log_t;
+	')
+
+	logging_search_logs($1)
+	filetrans_pattern($1, hadoop_log_t, $2, $3)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop pid
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_pid_filetrans',`
+	gen_require(`
+		type hadoop_var_run_t;
+	')
+
+	files_search_pids($1)
+	filetrans_pattern($1, hadoop_var_run_t, $2, $3)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop temporary
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_tmp_filetrans',`
+	gen_require(`
+		type hadoop_tmp_t;
+	')
+
+	files_search_tmp($1)
+	filetrans_pattern($1, hadoop_tmp_t, $2, $3)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..b132803
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,219 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Global declarations.
+#
+
+attribute hadoop_domain;
+attribute hadoop_initrc_domain;
+attribute hadoop_log_file;
+attribute hadoop_pid_file;
+attribute hadoop_lib_file;
+attribute hadoop_tmp_file;
+
+########################################
+#
+# Hadoop declarations.
+#
+
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+########################################
+#
+# Hadoop datanode declarations.
+#
+
+hadoop_domain_template(datanode)
+
+########################################
+#
+# Hadoop jobtracker declarations.
+#
+
+hadoop_domain_template(jobtracker)
+
+########################################
+#
+# Hadoop namenode declarations.
+#
+
+hadoop_domain_template(namenode)
+
+########################################
+#
+# Hadoop secondary namenode declarations.
+#
+
+hadoop_domain_template(secondarynamenode)
+
+########################################
+#
+# Hadoop tasktracker declarations.
+#
+
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Global policy.
+#
+
+########################################
+#
+# Hadoop policy.
+#
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { getsched execmem sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_lib_t, zookeeper_server_var_lib_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_lib_t, zookeeper_server_var_lib_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_lib_t, dir)
+
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/86d21075/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-20 19:33     ` Dominick Grift
@ 2010-09-20 19:50       ` Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-20 19:50 UTC (permalink / raw)
  To: refpolicy

On 09/20/2010 09:33 PM, Dominick Grift wrote:
> On Mon, Sep 20, 2010 at 02:02:12PM -0400, Paul Nuzzi wrote:
>> On 09/20/2010 01:03 PM, Dominick Grift wrote:
>>> On Mon, Sep 20, 2010 at 10:34:28AM -0400, Paul Nuzzi wrote:
>>>> I fixed the hadoop patch based on all of the feedback I received.  Added role support for sysadm_r to all of the services and programs.  Steve and I were not able to successfully use init_script_domain.  The interface didn't provide what we needed so I had to patch unconfined.if with a role transition interface.  It was also causing problems with sysadm_r.  I split up the patches since it was huge. 
>>>
>>> Why did the init script domain not work for you?
>>>
>>> I am interested in helping to make this policy upstreamable but i am not sure about how to deal with this init scenario and i would like to hear from others what the best way is to go forward with this.
>>>
>>
>> I wasn't able to transfer into the pseudo initrc domain with init_script_domain.  Using
>> init_script_domain(hadoop_datanode_initrc_t, hadoop_datanode_initrc_exec_t) executed the startup script in unconfined_u:system_r:initrc_t instead of :hadoop_datanode_initrc_t.  Using init_daemon_domain (which I know works) and init_script_domain together gives a semodule insert error conflicting te rule for (init_t, hadoop_datanode_initrc_exec_t:process): old was initrc_t, new is hadoop_datanode_initrc_t.  Maybe this is because it contains domtrans_pattern(init_run_all_scripts_domain, $2, $1) instead of domtrans_pattern(initrc_t,$2,$1) that init_daemon_domain has.
> 
> I just test it and it works provided that you use run_init to start the daemon.
> 
> I suspect Fedora broken the functionality to make it work by default:
> 
> These seem to be the culprits:
> 
> init_exec_script_files(sysadm_t)
> init_domtrans_script(unconfined_t)
> 
> Here is how to reproduce how i got it to work:
> 
> policy_module(test, 1.0.0)
> 
> type test_t;
> type test_exec_t;
> init_script_domain(test_t, test_exec_t)
> role system_r types test_t;
> 
> chcon -t test_exec_t /etc/rc.d/init.d/httpd
> 
> sudo -r sysadm_r -t sysadm_t
> run_init service httpd start
> 
> sudo -r unconfined_r -t unconfined_t
> run_init service httpd start
> 
> 

The problem i think is that redhats policy diverged from refpolicy,
especially with regard to this functionality.

This makes it that much harder to develop policy on redhat
configurations that should get adopted in refpolicy.

The use of the init script domain() will probably work just fine in
refpolicy, and so if you want your policy upstreamed you should probably
use that.

Redhat will have to deal with it once it merges refpolicy into its
branch (or they just exclude it).

> 
>>
>> Searching through refpolicy I don't see any references to init_script_domain.  Lets see what everyone else thinks.


-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 261 bytes
Desc: OpenPGP digital signature
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100920/e96a3a31/attachment.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-20 18:02   ` Paul Nuzzi
@ 2010-09-20 19:33     ` Dominick Grift
  2010-09-20 19:50       ` Dominick Grift
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-20 19:33 UTC (permalink / raw)
  To: refpolicy

On Mon, Sep 20, 2010 at 02:02:12PM -0400, Paul Nuzzi wrote:
> On 09/20/2010 01:03 PM, Dominick Grift wrote:
> > On Mon, Sep 20, 2010 at 10:34:28AM -0400, Paul Nuzzi wrote:
> >> I fixed the hadoop patch based on all of the feedback I received.  Added role support for sysadm_r to all of the services and programs.  Steve and I were not able to successfully use init_script_domain.  The interface didn't provide what we needed so I had to patch unconfined.if with a role transition interface.  It was also causing problems with sysadm_r.  I split up the patches since it was huge. 
> > 
> > Why did the init script domain not work for you?
> > 
> > I am interested in helping to make this policy upstreamable but i am not sure about how to deal with this init scenario and i would like to hear from others what the best way is to go forward with this.
> > 
> 
> I wasn't able to transfer into the pseudo initrc domain with init_script_domain.  Using
> init_script_domain(hadoop_datanode_initrc_t, hadoop_datanode_initrc_exec_t) executed the startup script in unconfined_u:system_r:initrc_t instead of :hadoop_datanode_initrc_t.  Using init_daemon_domain (which I know works) and init_script_domain together gives a semodule insert error conflicting te rule for (init_t, hadoop_datanode_initrc_exec_t:process): old was initrc_t, new is hadoop_datanode_initrc_t.  Maybe this is because it contains domtrans_pattern(init_run_all_scripts_domain, $2, $1) instead of domtrans_pattern(initrc_t,$2,$1) that init_daemon_domain has.

I just test it and it works provided that you use run_init to start the daemon.

I suspect Fedora broken the functionality to make it work by default:

These seem to be the culprits:

init_exec_script_files(sysadm_t)
init_domtrans_script(unconfined_t)

Here is how to reproduce how i got it to work:

policy_module(test, 1.0.0)

type test_t;
type test_exec_t;
init_script_domain(test_t, test_exec_t)
role system_r types test_t;

chcon -t test_exec_t /etc/rc.d/init.d/httpd

sudo -r sysadm_r -t sysadm_t
run_init service httpd start

sudo -r unconfined_r -t unconfined_t
run_init service httpd start




> 
> Searching through refpolicy I don't see any references to init_script_domain.  Lets see what everyone else thinks.
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100920/2e8bf76c/attachment.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-20 14:34 Paul Nuzzi
  2010-09-20 17:03 ` Dominick Grift
@ 2010-09-20 19:01 ` Dominick Grift
  1 sibling, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-20 19:01 UTC (permalink / raw)
  To: refpolicy

On Mon, Sep 20, 2010 at 10:34:28AM -0400, Paul Nuzzi wrote:
> I fixed the hadoop patch based on all of the feedback I received.  Added role support for sysadm_r to all of the services and programs.  Steve and I were not able to successfully use init_script_domain.  The interface didn't provide what we needed so I had to patch unconfined.if with a role transition interface.  It was also causing problems with sysadm_r.  I split up the patches since it was huge. 
> 
> Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

I do not think it is a good idea to run these services with the unconfined_r (or sysadm_r) roles instead try:

init_script_role_transition()

> 
> ---
>  policy/modules/system/unconfined.if |   25 +++++++++++++++++++++++++
>  1 file changed, 25 insertions(+)
> 
> diff --git a/policy/modules/system/unconfined.if b/policy/modules/system/unconfined.if
> index 416e668..3364eb3 100644
> --- a/policy/modules/system/unconfined.if
> +++ b/policy/modules/system/unconfined.if
> @@ -279,6 +279,31 @@ interface(`unconfined_domtrans_to',`
>  
>  ########################################
>  ## <summary>
> +##	Allow a program to enter the specified domain through the
> +## 	unconfined role.
> +## </summary>
> +## <desc>
> +##	<p>
> +##	Allow unconfined role to execute the specified program in
> +##	the specified domain.
> +##	</p>
> +## </desc>
> +## <param name="domain">
> +##	<summary>
> +##	Domain to execute in.
> +##	</summary>
> +## </param>
> +#
> +interface(`unconfined_roletrans',`
> +	gen_require(`
> +		role unconfined_r;
\x18y> +	')
> +
> +	role unconfined_r types $1;
> +')
> +
> +########################################
> +## <summary>
>  ##	Allow unconfined to execute the specified program in
>  ##	the specified domain.  Allow the specified domain the
>  ##	unconfined role and use of unconfined user terminals.
> 
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100920/2afb0e85/attachment.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-20 17:03 ` Dominick Grift
@ 2010-09-20 18:02   ` Paul Nuzzi
  2010-09-20 19:33     ` Dominick Grift
  0 siblings, 1 reply; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-20 18:02 UTC (permalink / raw)
  To: refpolicy

On 09/20/2010 01:03 PM, Dominick Grift wrote:
> On Mon, Sep 20, 2010 at 10:34:28AM -0400, Paul Nuzzi wrote:
>> I fixed the hadoop patch based on all of the feedback I received.  Added role support for sysadm_r to all of the services and programs.  Steve and I were not able to successfully use init_script_domain.  The interface didn't provide what we needed so I had to patch unconfined.if with a role transition interface.  It was also causing problems with sysadm_r.  I split up the patches since it was huge. 
> 
> Why did the init script domain not work for you?
> 
> I am interested in helping to make this policy upstreamable but i am not sure about how to deal with this init scenario and i would like to hear from others what the best way is to go forward with this.
> 

I wasn't able to transfer into the pseudo initrc domain with init_script_domain.  Using
init_script_domain(hadoop_datanode_initrc_t, hadoop_datanode_initrc_exec_t) executed the startup script in unconfined_u:system_r:initrc_t instead of :hadoop_datanode_initrc_t.  Using init_daemon_domain (which I know works) and init_script_domain together gives a semodule insert error conflicting te rule for (init_t, hadoop_datanode_initrc_exec_t:process): old was initrc_t, new is hadoop_datanode_initrc_t.  Maybe this is because it contains domtrans_pattern(init_run_all_scripts_domain, $2, $1) instead of domtrans_pattern(initrc_t,$2,$1) that init_daemon_domain has.

Searching through refpolicy I don't see any references to init_script_domain.  Lets see what everyone else thinks.

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
  2010-09-20 14:34 Paul Nuzzi
@ 2010-09-20 17:03 ` Dominick Grift
  2010-09-20 18:02   ` Paul Nuzzi
  2010-09-20 19:01 ` Dominick Grift
  1 sibling, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-20 17:03 UTC (permalink / raw)
  To: refpolicy

On Mon, Sep 20, 2010 at 10:34:28AM -0400, Paul Nuzzi wrote:
> I fixed the hadoop patch based on all of the feedback I received.  Added role support for sysadm_r to all of the services and programs.  Steve and I were not able to successfully use init_script_domain.  The interface didn't provide what we needed so I had to patch unconfined.if with a role transition interface.  It was also causing problems with sysadm_r.  I split up the patches since it was huge. 

Why did the init script domain not work for you?

I am interested in helping to make this policy upstreamable but i am not sure about how to deal with this init scenario and i would like to hear from others what the best way is to go forward with this.

> 
> Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>
> 
> ---
>  policy/modules/system/unconfined.if |   25 +++++++++++++++++++++++++
>  1 file changed, 25 insertions(+)
> 
> diff --git a/policy/modules/system/unconfined.if b/policy/modules/system/unconfined.if
> index 416e668..3364eb3 100644
> --- a/policy/modules/system/unconfined.if
> +++ b/policy/modules/system/unconfined.if
> @@ -279,6 +279,31 @@ interface(`unconfined_domtrans_to',`
>  
>  ########################################
>  ## <summary>
> +##	Allow a program to enter the specified domain through the
> +## 	unconfined role.
> +## </summary>
> +## <desc>
> +##	<p>
> +##	Allow unconfined role to execute the specified program in
> +##	the specified domain.
> +##	</p>
> +## </desc>
> +## <param name="domain">
> +##	<summary>
> +##	Domain to execute in.
> +##	</summary>
> +## </param>
> +#
> +interface(`unconfined_roletrans',`
> +	gen_require(`
> +		role unconfined_r;
> +	')
> +
> +	role unconfined_r types $1;
> +')
> +
> +########################################
> +## <summary>
>  ##	Allow unconfined to execute the specified program in
>  ##	the specified domain.  Allow the specified domain the
>  ##	unconfined role and use of unconfined user terminals.
> 
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100920/42b8d5ff/attachment.bin 

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-20 14:34 Paul Nuzzi
  2010-09-20 17:03 ` Dominick Grift
  2010-09-20 19:01 ` Dominick Grift
  0 siblings, 2 replies; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-20 14:34 UTC (permalink / raw)
  To: refpolicy

I fixed the hadoop patch based on all of the feedback I received.  Added role support for sysadm_r to all of the services and programs.  Steve and I were not able to successfully use init_script_domain.  The interface didn't provide what we needed so I had to patch unconfined.if with a role transition interface.  It was also causing problems with sysadm_r.  I split up the patches since it was huge. 

Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

---
 policy/modules/system/unconfined.if |   25 +++++++++++++++++++++++++
 1 file changed, 25 insertions(+)

diff --git a/policy/modules/system/unconfined.if b/policy/modules/system/unconfined.if
index 416e668..3364eb3 100644
--- a/policy/modules/system/unconfined.if
+++ b/policy/modules/system/unconfined.if
@@ -279,6 +279,31 @@ interface(`unconfined_domtrans_to',`
 
 ########################################
 ## <summary>
+##	Allow a program to enter the specified domain through the
+## 	unconfined role.
+## </summary>
+## <desc>
+##	<p>
+##	Allow unconfined role to execute the specified program in
+##	the specified domain.
+##	</p>
+## </desc>
+## <param name="domain">
+##	<summary>
+##	Domain to execute in.
+##	</summary>
+## </param>
+#
+interface(`unconfined_roletrans',`
+	gen_require(`
+		role unconfined_r;
+	')
+
+	role unconfined_r types $1;
+')
+
+########################################
+## <summary>
 ##	Allow unconfined to execute the specified program in
 ##	the specified domain.  Allow the specified domain the
 ##	unconfined role and use of unconfined user terminals.

^ permalink raw reply related	[flat|nested] 37+ messages in thread

end of thread, other threads:[~2010-10-07 16:35 UTC | newest]

Thread overview: 37+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-09-21 19:57 [refpolicy] [PATCH] hadoop 1/10 -- unconfined Dominick Grift
2010-09-21 20:04 ` Jeremy Solt
2010-09-23 13:13   ` Paul Nuzzi
2010-09-24 14:20     ` Jeremy Solt
2010-09-27 18:50       ` Paul Nuzzi
2010-09-30 19:39         ` Paul Nuzzi
2010-10-01 12:02           ` Dominick Grift
2010-10-01 15:17             ` Paul Nuzzi
2010-10-01 17:56               ` Christopher J. PeBenito
2010-10-04 17:15                 ` Paul Nuzzi
2010-10-04 18:18                   ` Christopher J. PeBenito
2010-10-05 19:59                     ` Paul Nuzzi
2010-10-07 14:41                       ` Chris PeBenito
2010-10-07 16:35                         ` Paul Nuzzi
2010-10-01 18:01               ` Dominick Grift
2010-10-01 19:06                 ` Paul Nuzzi
  -- strict thread matches above, loose matches on Subject: below --
2010-10-06 10:25 Dominick Grift
2010-10-06 15:54 ` Paul Nuzzi
2010-10-06 17:34   ` Dominick Grift
2010-10-06 10:06 Dominick Grift
2010-09-23 14:53 Dominick Grift
2010-09-21 16:29 Dominick Grift
2010-09-21  9:02 Dominick Grift
2010-09-21 15:42 ` Paul Nuzzi
2010-09-21 16:14   ` Dominick Grift
2010-09-21 16:34     ` Paul Nuzzi
2010-09-21 17:08       ` Dominick Grift
2010-09-23 13:54         ` Paul Nuzzi
2010-09-23 14:40           ` Dominick Grift
2010-09-21 19:55       ` Jeremy Solt
2010-09-20 22:24 Dominick Grift
2010-09-20 14:34 Paul Nuzzi
2010-09-20 17:03 ` Dominick Grift
2010-09-20 18:02   ` Paul Nuzzi
2010-09-20 19:33     ` Dominick Grift
2010-09-20 19:50       ` Dominick Grift
2010-09-20 19:01 ` Dominick Grift

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.