All of lore.kernel.org
 help / color / mirror / Atom feed
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-21 19:57 Dominick Grift
  2010-09-21 20:04 ` Jeremy Solt
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-21 19:57 UTC (permalink / raw)
  To: refpolicy


Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 6cc0049... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   40 ++++
 policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
 4 files changed, 638 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..d88b5ff
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,40 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_t,s0)
+
+/var/lock/subsys/hadoop-datanode	--	gen_context(system_u:object_r:hadoop_datanode_initrc_lock_t,s0)
+/var/lock/subsys/hadoop-namenode	--	gen_context(system_u:object_r:hadoop_namenode_initrc_lock_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop								-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-datanode.pid	--	gen_context(system_u:object_r:hadoop_datanode_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-namenode.pid	--	gen_context(system_u:object_r:hadoop_namenode_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..6cc0049
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,247 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	corecmd_exec_all_executables(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..53a242b
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,347 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+# What or who runs this?
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_server_t)
+')
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/31440ada/attachment.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-10-06 10:25 Dominick Grift
  2010-10-06 15:54 ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-10-06 10:25 UTC (permalink / raw)
  To: refpolicy

Some more suggested changes.One of which is to not allow hadoop rc script domains and hadoop domain to not write log file (just append)
I wonder if this revision still works for you.
I am also still wondering about the file context specification. If i am correct, you state that some of them do not work for one reason or another. We should really try to make them all work else it does not make sense to specify them in the first place.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 73163db... M	policy/modules/kernel/corenetwork.te.in
:100644 100644 cad05ff... d2bc2b1... M	policy/modules/roles/sysadm.te
:000000 100644 0000000... 5935162... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... cee7cd5... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 515d2da... A	policy/modules/services/hadoop.te
:100644 100644 f976344... f1e6c9f... M	policy/modules/system/unconfined.te
 policy/modules/kernel/corenetwork.te.in |    5 +
 policy/modules/roles/sysadm.te          |    8 +
 policy/modules/services/hadoop.fc       |   55 ++++
 policy/modules/services/hadoop.if       |  364 +++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  410 +++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 +
 6 files changed, 850 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..73163db 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_datanode, tcp, 50010,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..d2bc2b1 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5935162
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,55 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+# Why do these regular expresions differ from the ones below (/etc/rc.d/init.d)? Which of the two works best?
+/etc/init\.d/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper.*(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid			--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid			--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..cee7cd5
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,364 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t, hadoop_hsperfdata_t, hadoop_etc_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	can_exec(hadoop_$1_initrc_t, hadoop_etc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	files_dontaudit_list_default(hadoop_$1_initrc_t)
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	consoletype_exec(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t self:process execmem;
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, { file dir })
+	files_search_var_lib(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+
+	append_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_t)
+
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr_dir_perms;
+	files_search_pids(hadoop_$1_t)
+
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+	can_exec(hadoop_$1_t, hadoop_etc_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	# Hadoop uses high ordered random ports for services
+	# If permanent ports are chosen, remove line below and lock down
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+
+	files_read_etc_files(hadoop_$1_t)
+
+	miscfiles_read_localization(hadoop_$1_t)
+
+	sysnet_read_config(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	hadoop_domtrans_zookeeper_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Read hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_read_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	read_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+	read_lnk_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_exec_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file read_lnk_file_perms;
+	can_exec($1, hadoop_etc_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..515d2da
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,410 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { signal_perms setrlimit execmem };
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+allow hadoop_t hadoop_domain:process signull;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+# not sure:
+files_search_var_lib(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+# not sure:
+logging_search_logs(hadoop_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+# not sure:
+files_search_pids(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_list_default(hadoop_t)
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+# Seems a bit coarse
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_datanode_server_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_datanode_t)
+corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+
+fs_getattr_xattr_fs(hadoop_datanode_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_zope_server_packets(hadoop_jobtracker_t)
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_namenode_t)
+corenet_sendrecv_hadoop_namenode_server_packets(hadoop_namenode_t)
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_secondarynamenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_zope_client_packets(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+
+fs_associate(hadoop_tasktracker_t)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched signal_perms execmem };
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_dontaudit_list_default(zookeeper_t)
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched signal_perms };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..f1e6c9f 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101006/82b17324/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-10-06 10:06 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-10-06 10:06 UTC (permalink / raw)
  To: refpolicy

I tried to clean up the latest revision a bit. There are also a few things that leave me wondering. Most of which i commented.

My take on hadoop.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 73163db... M	policy/modules/kernel/corenetwork.te.in
:100644 100644 cad05ff... d2bc2b1... M	policy/modules/roles/sysadm.te
:000000 100644 0000000... 5935162... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 69519f0... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 6a79d31... A	policy/modules/services/hadoop.te
:100644 100644 f976344... f1e6c9f... M	policy/modules/system/unconfined.te
 policy/modules/kernel/corenetwork.te.in |    5 +
 policy/modules/roles/sysadm.te          |    8 +
 policy/modules/services/hadoop.fc       |   55 ++++
 policy/modules/services/hadoop.if       |  358 +++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  411 +++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 +
 6 files changed, 845 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..73163db 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_datanode, tcp, 50010,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..d2bc2b1 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5935162
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,55 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+# Why do these regular expresions differ from the ones below (/etc/rc.d/init.d)? Which of the two works best?
+/etc/init\.d/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper.*(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid			--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid			--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..69519f0
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,358 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t, hadoop_hsperfdata_t, hadoop_etc_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, { dir file })
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	can_exec(hadoop_$1_initrc_t, hadoop_etc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	files_dontaudit_list_default(hadoop_$1_initrc_t)
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	consoletype_exec(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t self:process execmem;
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, { file dir })
+	files_search_var_lib(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_t)
+
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr_dir_perms;
+	files_search_pids(hadoop_$1_t)
+
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+	can_exec(hadoop_$1_t, hadoop_etc_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	# Hadoop uses high ordered random ports for services
+	# If permanent ports are chosen, remove line below and lock down
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+
+	files_read_etc_files(hadoop_$1_t)
+
+	miscfiles_read_localization(hadoop_$1_t)
+
+	sysnet_read_config(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	hadoop_domtrans_zookeeper_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Read hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_read_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	read_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+	read_lnk_files_pattern($1, hadoop_etc_t, hadoop_etc_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop configuration files.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+#
+interface(`hadoop_exec_config_files',`
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	files_search_etc($1)
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file read_lnk_file_perms;
+	can_exec($1, hadoop_etc_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..6a79d31
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,411 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { signal_perms setrlimit execmem };
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+allow hadoop_t hadoop_domain:process signull;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+# not sure:
+files_search_var_lib(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+# not sure:
+logging_search_logs(hadoop_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+# not sure:
+files_search_pids(hadoop_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_list_default(hadoop_t)
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+# Seems a bit coarse
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_datanode_server_packets(hadoop_datanode_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_datanode_t)
+corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+
+fs_getattr_xattr_fs(hadoop_datanode_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_jobtracker_t)
+corenet_sendrecv_zope_server_packets(hadoop_jobtracker_t)
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_namenode_t)
+corenet_sendrecv_hadoop_namenode_server_packets(hadoop_namenode_t)
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_secondarynamenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+
+corenet_sendrecv_hadoop_datanode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_tasktracker_t)
+corenet_sendrecv_zope_client_packets(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+
+fs_associate(hadoop_tasktracker_t)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched signal_perms execmem };
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_dontaudit_list_default(zookeeper_t)
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched signal_perms };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..f1e6c9f 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101006/8a1f9c21/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-23 14:53 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-23 14:53 UTC (permalink / raw)
  To: refpolicy

Fixed the required exec type for hadoop in hadoop_domtrans.

Moved some stuff from rc to main domains as i suspect its not the rc scripts requiring this.

maybe some other trivial changes.\x18

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... d88b5ff... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 5c66ae4... A	policy/modules/services/hadoop.if
:000000 100644 0000000... e947a6b... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   40 ++++
 policy/modules/services/hadoop.if       |  241 +++++++++++++++++++++
 policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
 4 files changed, 632 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..d88b5ff
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,40 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_t,s0)
+
+/var/lock/subsys/hadoop-datanode	--	gen_context(system_u:object_r:hadoop_datanode_initrc_lock_t,s0)
+/var/lock/subsys/hadoop-namenode	--	gen_context(system_u:object_r:hadoop_namenode_initrc_lock_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop								-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-datanode.pid	--	gen_context(system_u:object_r:hadoop_datanode_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-namenode.pid	--	gen_context(system_u:object_r:hadoop_namenode_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..5c66ae4
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,241 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	append_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_t)
+
+	allow hadoop_$1_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..e947a6b
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,347 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_server_t)
+')
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100923/050f6165/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-21 16:29 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-21 16:29 UTC (permalink / raw)
  To: refpolicy

Here is a new version with most of the feedback applied.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... 896ceda... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 2a5fe66... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 53a242b... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   35 +++
 policy/modules/services/hadoop.if       |  247 ++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  347 +++++++++++++++++++++++++++++++
 4 files changed, 633 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..896ceda
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,35 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?		gen_context(system_u:object_r:hadoop_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..2a5fe66
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,247 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+	domain_transition_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	corecmd_exec_all_executables(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_initrc_t)
+	libs_use_shared_libs(hadoop_$1_initrc_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..53a242b
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,347 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+# What or who runs this?
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_server_t)
+')
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/3e9abc06/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-21  9:02 Dominick Grift
  2010-09-21 15:42 ` Paul Nuzzi
  0 siblings, 1 reply; 37+ messages in thread
From: Dominick Grift @ 2010-09-21  9:02 UTC (permalink / raw)
  To: refpolicy

Well ive rewritten the policy as much as i ca with the information that i currently have.
Because of the use of the hadoop domain attributes i cannot determine whether it is the initrc script doing something or the application, and so i cannot currently finish the hadoop_domain_template policy.
Also i have no clue what transitions to the hadoop_t domain. It does not own an initrc script so i gather it is no init daemon domain. Must be an application domain then?
A lot of other things that arent, clear and/ or make no sense.
I have also left out things that i think, should be handled differently.

It would be cool if someone could test this policy and provide feedback in the shape of avc denials.

Some properties of this policy:

The hadoop init script domains must be started by the system, or by unconfined or sysadm_t by using run_init server <hadoop service>
To use the zookeeper client domain, the zookeeper_run_client domain must be called for a domain. (for example if you wish to run it as unconfined_t, you would call zookeeper_run_client(unconfined_t, unconfined_r)
The zookeeper server seems to be an ordinary init daemon domain.
Since i do not know what kind of dommain hadoop_t is, it is currently pretty much unreachable. I have created an hadoop_domtrans interface that can be called but currently no role is allowed the hadoop_t domain.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... bce5d29... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 462d851... A	policy/modules/services/hadoop.if
:000000 100644 0000000... 880f09a... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   34 +++
 policy/modules/services/hadoop.if       |  294 +++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  339 +++++++++++++++++++++++++++++++
 4 files changed, 671 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..bce5d29
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,34 @@
+/etc/hadoop.*(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_data_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?		gen_context(system_u:object_r:hadoop_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..462d851
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,294 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+
+	hadoop_exec_entry_type(hadoop_$1_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	# This will need a file context specification.
+	type hadoop_$1_initrc_lock_t;
+	files_lock_file(hadoop_$1_initrc_lock_t)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	# permissive hadoop_$1_t;
+	# permissive hadoop_$1_initrc_exec_t;
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+	append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_var_run_t:file manage_file_perms;
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+	files_search_pids(hadoop_$1_initrc_t)
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+	hadoop_spec_domtrans(hadoop_$1_initrc_t, hadoop_$1_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+
+	corecmd_exec_all_executables(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+
+	# This can be removed on anything post-el5
+	libs_use_ld_so(hadoop_$1_t)
+	libs_use_shared_libs(hadoop_$1_t)
+
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Make hadoop executable files an
+##	entrypoint for the specified domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	The domain for which hadoop_exec_t
+##	is an entrypoint.
+##	</summary>
+## </param>
+#
+interface(`hadoop_exec_entry_type',`
+	gen_require(`
+		type hadoop_exec_t;
+	')
+
+	domain_entry_file($1, hadoop_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop executable files
+##	in the specified domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="target_domain">
+##	<summary>
+##	Domain to transition to.
+##	</summary>
+## </param>
+#
+interface(`hadoop_spec_domtrans',`
+	gen_require(`
+		type hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domain_transition_pattern($1, hadoop_exec_t, $2)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..880f09a
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,339 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+# What or who runs this?
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(hadoop_t)
+')
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+	# Java might not be optional
+	java_exec(zookeeper_t)
+')
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/58913164/attachment.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-20 22:24 Dominick Grift
  0 siblings, 0 replies; 37+ messages in thread
From: Dominick Grift @ 2010-09-20 22:24 UTC (permalink / raw)
  To: refpolicy

I made a start on my take on this policy. This is what i have so far.

I still have a lot of questions though.

Signed-off-by: Dominick Grift <domg472@gmail.com>
---
:100644 100644 2ecdde8... 7a1b5de... M	policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... 3ae8107... A	policy/modules/services/hadoop.fc
:000000 100644 0000000... 7e43690... A	policy/modules/services/hadoop.if
:000000 100644 0000000... b132803... A	policy/modules/services/hadoop.te
 policy/modules/kernel/corenetwork.te.in |    4 +
 policy/modules/services/hadoop.fc       |   35 ++++
 policy/modules/services/hadoop.if       |  332 +++++++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  219 ++++++++++++++++++++
 4 files changed, 590 insertions(+), 0 deletions(-)

diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..3ae8107
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,35 @@
+/etc/hadoop(/.*)?			gen_context(system_u:object_r:hadoop_etc_t,s0)
+/etc/hadoop-0.20(/.*)?		gen_context(system_u:object_r:hadoop_etc_t,s0)
+/etc/zookeeper(/.*)?		gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper.dist(/.*)?	gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode			--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode			--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop	--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client		--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server		--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?				gen_context(system_u:object_r:zookeeper_server__t,s0)
+
+/var/lib/hadoop(.*)?												gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?					gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?					gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?			gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_data_t,s0)
+
+/var/log/hadoop(.*)?										gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?			gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?			gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?			gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)?									gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?		gen_context(system_u:object_r:hadoop_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..7e43690
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,332 @@
+## <summary>The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing.</summary>
+## <desc>
+##	<p>
+##		Hadoop Common: The common utilities that support the
+##		other Hadoop subprojects.
+##	</p>
+##	<p>
+##		Chukwa: A data collection system for managing large
+##		distributed systems.
+##	</p>
+##	<p>
+##		HBase: A scalable, distributed database that supports
+##		structured data storage for large tables.
+##	</p>
+##	<p>
+##		HDFS: A distributed file system that provides high
+##		throughput access to application data.
+##	</p>
+##	<p>
+##		Hive: A data warehouse infrastructure that provides
+##		data summarization and ad hoc querying.
+##	</p>
+##	<p>
+##		MapReduce: A software framework for distributed
+##		processing of large data sets on compute clusters.
+##	</p>
+##	<p>
+##		Pig: A high-level data-flow language and execution
+##		framework for parallel computation.
+##	</p>
+##	<p>
+##		ZooKeeper: A high-performance coordination service for
+##		distributed applications.
+##	</p>
+## </desc>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <desc>
+##	<p>
+##	This template creates a domain to be used for
+##	a new hadoop daemon.
+##	</p>
+## </desc>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain, hadoop_initrc_domain, hadoop_lib_file;
+		attribute hadoop_pid_file, hadoop_log_file, hadoop_tmp_file;
+		type hadoop_var_lib_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+
+	type hadoop_$1_initrc_t, hadoop_initrc_domain;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_datanode_$1_exec_t)
+	role system_r types hadoop_$1_initrc_t;
+
+	type hadoop_$1_var_lib_t, hadoop_lib_file;
+	files_type(hadoop_$1_var_lib_t)
+
+	type hadoop_$1_log_t, hadoop_log_file;
+	logging_log_file(hadoop_$1_log_t)
+
+	# This will need a file context specification.
+	type hadoop_$1_var_run_t, hadoop_pid_file;
+	files_pid_file(hadoop_$1_var_run_t)
+
+	type hadoop_$1_tmp_t, hadoop_tmp_file;
+	files_tmp_file(hadoop_$1_tmp_t)
+
+	####################################
+	#
+	# Shared policy.
+	#
+
+	allow hadoop_domain self:process signull;
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+#
+interface(`zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	zookeeper_domtrans_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute the hadoop executable file
+##	in the specified domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="target_domain">
+##	<summary>
+##	Domain to transition to.
+##	</summary>
+## </param>
+#
+interface(`hadoop_spec_domtrans',`
+	gen_require(`
+		type hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib(
+	domain_transition_pattern($1, hadoop_exec_t, $2)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop lib
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_var_lib_filetrans',`
+	gen_require(`
+		type hadoop_var_lib_t;
+	')
+
+	files_search_var_lib($1)
+	filetrans_pattern($1, hadoop_var_lib_t, $2, $3)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop log
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_log_filetrans',`
+	gen_require(`
+		type hadoop_log_t;
+	')
+
+	logging_search_logs($1)
+	filetrans_pattern($1, hadoop_log_t, $2, $3)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop pid
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_pid_filetrans',`
+	gen_require(`
+		type hadoop_var_run_t;
+	')
+
+	files_search_pids($1)
+	filetrans_pattern($1, hadoop_var_run_t, $2, $3)
+')
+
+########################################
+## <summary>
+##	Create objects in hadoop temporary
+##	directories with a private type.
+## </summary>
+## <desc>
+## <param name="domain">
+##	<summary>
+##	Domain allowed access.
+##	</summary>
+## </param>
+## <param name="private type">
+##	<summary>
+##	The type of the object to be created.
+##	</summary>
+## </param>
+## <param name="object">
+##	<summary>
+##	The object class of the object being created.
+##	</summary>
+## </param>
+#
+interface(`hadoop_tmp_filetrans',`
+	gen_require(`
+		type hadoop_tmp_t;
+	')
+
+	files_search_tmp($1)
+	filetrans_pattern($1, hadoop_tmp_t, $2, $3)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..b132803
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,219 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Global declarations.
+#
+
+attribute hadoop_domain;
+attribute hadoop_initrc_domain;
+attribute hadoop_log_file;
+attribute hadoop_pid_file;
+attribute hadoop_lib_file;
+attribute hadoop_tmp_file;
+
+########################################
+#
+# Hadoop declarations.
+#
+
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+########################################
+#
+# Hadoop datanode declarations.
+#
+
+hadoop_domain_template(datanode)
+
+########################################
+#
+# Hadoop jobtracker declarations.
+#
+
+hadoop_domain_template(jobtracker)
+
+########################################
+#
+# Hadoop namenode declarations.
+#
+
+hadoop_domain_template(namenode)
+
+########################################
+#
+# Hadoop secondary namenode declarations.
+#
+
+hadoop_domain_template(secondarynamenode)
+
+########################################
+#
+# Hadoop tasktracker declarations.
+#
+
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Global policy.
+#
+
+########################################
+#
+# Hadoop policy.
+#
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { getsched execmem sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_lib_t, zookeeper_server_var_lib_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_lib_t, zookeeper_server_var_lib_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_lib_t, dir)
+
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
-- 
1.7.2.3

-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/86d21075/attachment-0001.bin 

^ permalink raw reply related	[flat|nested] 37+ messages in thread
* [refpolicy] [PATCH] hadoop 1/10 -- unconfined
@ 2010-09-20 14:34 Paul Nuzzi
  2010-09-20 17:03 ` Dominick Grift
  2010-09-20 19:01 ` Dominick Grift
  0 siblings, 2 replies; 37+ messages in thread
From: Paul Nuzzi @ 2010-09-20 14:34 UTC (permalink / raw)
  To: refpolicy

I fixed the hadoop patch based on all of the feedback I received.  Added role support for sysadm_r to all of the services and programs.  Steve and I were not able to successfully use init_script_domain.  The interface didn't provide what we needed so I had to patch unconfined.if with a role transition interface.  It was also causing problems with sysadm_r.  I split up the patches since it was huge. 

Signed-off-by: Paul Nuzzi <pjnuzzi@tycho.ncsc.mil>

---
 policy/modules/system/unconfined.if |   25 +++++++++++++++++++++++++
 1 file changed, 25 insertions(+)

diff --git a/policy/modules/system/unconfined.if b/policy/modules/system/unconfined.if
index 416e668..3364eb3 100644
--- a/policy/modules/system/unconfined.if
+++ b/policy/modules/system/unconfined.if
@@ -279,6 +279,31 @@ interface(`unconfined_domtrans_to',`
 
 ########################################
 ## <summary>
+##	Allow a program to enter the specified domain through the
+## 	unconfined role.
+## </summary>
+## <desc>
+##	<p>
+##	Allow unconfined role to execute the specified program in
+##	the specified domain.
+##	</p>
+## </desc>
+## <param name="domain">
+##	<summary>
+##	Domain to execute in.
+##	</summary>
+## </param>
+#
+interface(`unconfined_roletrans',`
+	gen_require(`
+		role unconfined_r;
+	')
+
+	role unconfined_r types $1;
+')
+
+########################################
+## <summary>
 ##	Allow unconfined to execute the specified program in
 ##	the specified domain.  Allow the specified domain the
 ##	unconfined role and use of unconfined user terminals.

^ permalink raw reply related	[flat|nested] 37+ messages in thread

end of thread, other threads:[~2010-10-07 16:35 UTC | newest]

Thread overview: 37+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-09-21 19:57 [refpolicy] [PATCH] hadoop 1/10 -- unconfined Dominick Grift
2010-09-21 20:04 ` Jeremy Solt
2010-09-23 13:13   ` Paul Nuzzi
2010-09-24 14:20     ` Jeremy Solt
2010-09-27 18:50       ` Paul Nuzzi
2010-09-30 19:39         ` Paul Nuzzi
2010-10-01 12:02           ` Dominick Grift
2010-10-01 15:17             ` Paul Nuzzi
2010-10-01 17:56               ` Christopher J. PeBenito
2010-10-04 17:15                 ` Paul Nuzzi
2010-10-04 18:18                   ` Christopher J. PeBenito
2010-10-05 19:59                     ` Paul Nuzzi
2010-10-07 14:41                       ` Chris PeBenito
2010-10-07 16:35                         ` Paul Nuzzi
2010-10-01 18:01               ` Dominick Grift
2010-10-01 19:06                 ` Paul Nuzzi
  -- strict thread matches above, loose matches on Subject: below --
2010-10-06 10:25 Dominick Grift
2010-10-06 15:54 ` Paul Nuzzi
2010-10-06 17:34   ` Dominick Grift
2010-10-06 10:06 Dominick Grift
2010-09-23 14:53 Dominick Grift
2010-09-21 16:29 Dominick Grift
2010-09-21  9:02 Dominick Grift
2010-09-21 15:42 ` Paul Nuzzi
2010-09-21 16:14   ` Dominick Grift
2010-09-21 16:34     ` Paul Nuzzi
2010-09-21 17:08       ` Dominick Grift
2010-09-23 13:54         ` Paul Nuzzi
2010-09-23 14:40           ` Dominick Grift
2010-09-21 19:55       ` Jeremy Solt
2010-09-20 22:24 Dominick Grift
2010-09-20 14:34 Paul Nuzzi
2010-09-20 17:03 ` Dominick Grift
2010-09-20 18:02   ` Paul Nuzzi
2010-09-20 19:33     ` Dominick Grift
2010-09-20 19:50       ` Dominick Grift
2010-09-20 19:01 ` Dominick Grift

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.