diff options
Diffstat (limited to 'security')
29 files changed, 786 insertions, 179 deletions
| diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c index 4304372b323f..106e855e2d9d 100644 --- a/security/integrity/digsig.c +++ b/security/integrity/digsig.c @@ -51,7 +51,7 @@ static bool init_keyring __initdata;  int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,  			    const char *digest, int digestlen)  { -	if (id >= INTEGRITY_KEYRING_MAX) +	if (id >= INTEGRITY_KEYRING_MAX || siglen < 2)  		return -EINVAL;  	if (!keyring[id]) { diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index bf663915412e..d7f282d75cc1 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -151,8 +151,16 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,  	memset(&hmac_misc, 0, sizeof(hmac_misc));  	hmac_misc.ino = inode->i_ino;  	hmac_misc.generation = inode->i_generation; -	hmac_misc.uid = from_kuid(inode->i_sb->s_user_ns, inode->i_uid); -	hmac_misc.gid = from_kgid(inode->i_sb->s_user_ns, inode->i_gid); +	/* The hmac uid and gid must be encoded in the initial user +	 * namespace (not the filesystems user namespace) as encoding +	 * them in the filesystems user namespace allows an attack +	 * where first they are written in an unprivileged fuse mount +	 * of a filesystem and then the system is tricked to mount the +	 * filesystem for real on next boot and trust it because +	 * everything is signed. +	 */ +	hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid); +	hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);  	hmac_misc.mode = inode->i_mode;  	crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));  	if (evm_hmac_attrs & EVM_ATTR_FSUUID) diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index ba8615576d4d..e2ed498c0f5f 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -145,6 +145,10 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,  	/* check value type */  	switch (xattr_data->type) {  	case EVM_XATTR_HMAC: +		if (xattr_len != sizeof(struct evm_ima_xattr_data)) { +			evm_status = INTEGRITY_FAIL; +			goto out; +		}  		rc = evm_calc_hmac(dentry, xattr_name, xattr_value,  				   xattr_value_len, calc.digest);  		if (rc) diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 5487827fa86c..370eb2f4dd37 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -27,6 +27,18 @@ config IMA  	  to learn more about IMA.  	  If unsure, say N. +config IMA_KEXEC +	bool "Enable carrying the IMA measurement list across a soft boot" +	depends on IMA && TCG_TPM && HAVE_IMA_KEXEC +	default n +	help +	   TPM PCRs are only reset on a hard reboot.  In order to validate +	   a TPM's quote after a soft boot, the IMA measurement list of the +	   running kernel must be saved and restored on boot. + +	   Depending on the IMA policy, the measurement list can grow to +	   be very large. +  config IMA_MEASURE_PCR_IDX  	int  	depends on IMA diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile index 9aeaedad1e2b..29f198bde02b 100644 --- a/security/integrity/ima/Makefile +++ b/security/integrity/ima/Makefile @@ -8,4 +8,5 @@ obj-$(CONFIG_IMA) += ima.o  ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \  	 ima_policy.o ima_template.o ima_template_lib.o  ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o +ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o  obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index db25f54a04fe..5e6180a4da7d 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -28,6 +28,10 @@  #include "../integrity.h" +#ifdef CONFIG_HAVE_IMA_KEXEC +#include <asm/ima.h> +#endif +  enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,  		     IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };  enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; @@ -81,6 +85,7 @@ struct ima_template_field {  /* IMA template descriptor definition */  struct ima_template_desc { +	struct list_head list;  	char *name;  	char *fmt;  	int num_fields; @@ -102,6 +107,27 @@ struct ima_queue_entry {  };  extern struct list_head ima_measurements;	/* list of all measurements */ +/* Some details preceding the binary serialized measurement list */ +struct ima_kexec_hdr { +	u16 version; +	u16 _reserved0; +	u32 _reserved1; +	u64 buffer_size; +	u64 count; +}; + +#ifdef CONFIG_HAVE_IMA_KEXEC +void ima_load_kexec_buffer(void); +#else +static inline void ima_load_kexec_buffer(void) {} +#endif /* CONFIG_HAVE_IMA_KEXEC */ + +/* + * The default binary_runtime_measurements list format is defined as the + * platform native format.  The canonical format is defined as little-endian. + */ +extern bool ima_canonical_fmt; +  /* Internal IMA function definitions */  int ima_init(void);  int ima_fs_init(void); @@ -122,7 +148,12 @@ int ima_init_crypto(void);  void ima_putc(struct seq_file *m, void *data, int datalen);  void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);  struct ima_template_desc *ima_template_desc_current(void); +int ima_restore_measurement_entry(struct ima_template_entry *entry); +int ima_restore_measurement_list(loff_t bufsize, void *buf); +int ima_measurements_show(struct seq_file *m, void *v); +unsigned long ima_get_binary_runtime_size(void);  int ima_init_template(void); +void ima_init_template_list(void);  /*   * used to protect h_table and sha_table diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 389325ac6067..1fd9539a969d 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -130,6 +130,7 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,  				 int xattr_len)  {  	struct signature_v2_hdr *sig; +	enum hash_algo ret;  	if (!xattr_value || xattr_len < 2)  		/* return default hash algo */ @@ -143,7 +144,9 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,  		return sig->hash_algo;  		break;  	case IMA_XATTR_DIGEST_NG: -		return xattr_value->digest[0]; +		ret = xattr_value->digest[0]; +		if (ret < HASH_ALGO__LAST) +			return ret;  		break;  	case IMA_XATTR_DIGEST:  		/* this is for backward compatibility */ @@ -384,14 +387,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,  	result = ima_protect_xattr(dentry, xattr_name, xattr_value,  				   xattr_value_len);  	if (result == 1) { -		bool digsig; -  		if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))  			return -EINVAL; -		digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG); -		if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE)) -			return -EPERM; -		ima_reset_appraise_flags(d_backing_inode(dentry), digsig); +		ima_reset_appraise_flags(d_backing_inode(dentry), +			 (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);  		result = 0;  	}  	return result; diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index 38f2ed830dd6..802d5d20f36f 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -477,11 +477,13 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,  		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };  		u8 *data_to_hash = field_data[i].data;  		u32 datalen = field_data[i].len; +		u32 datalen_to_hash = +		    !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);  		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {  			rc = crypto_shash_update(shash, -						(const u8 *) &field_data[i].len, -						sizeof(field_data[i].len)); +						(const u8 *) &datalen_to_hash, +						sizeof(datalen_to_hash));  			if (rc)  				break;  		} else if (strcmp(td->fields[i]->field_id, "n") == 0) { diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index c07a3844ea0a..ca303e5d2b94 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -28,6 +28,16 @@  static DEFINE_MUTEX(ima_write_mutex); +bool ima_canonical_fmt; +static int __init default_canonical_fmt_setup(char *str) +{ +#ifdef __BIG_ENDIAN +	ima_canonical_fmt = 1; +#endif +	return 1; +} +__setup("ima_canonical_fmt", default_canonical_fmt_setup); +  static int valid_policy = 1;  #define TMPBUFLEN 12  static ssize_t ima_show_htable_value(char __user *buf, size_t count, @@ -116,13 +126,13 @@ void ima_putc(struct seq_file *m, void *data, int datalen)   *       [eventdata length]   *       eventdata[n]=template specific data   */ -static int ima_measurements_show(struct seq_file *m, void *v) +int ima_measurements_show(struct seq_file *m, void *v)  {  	/* the list never shrinks, so we don't need a lock here */  	struct ima_queue_entry *qe = v;  	struct ima_template_entry *e;  	char *template_name; -	int namelen; +	u32 pcr, namelen, template_data_len; /* temporary fields */  	bool is_ima_template = false;  	int i; @@ -139,25 +149,29 @@ static int ima_measurements_show(struct seq_file *m, void *v)  	 * PCR used defaults to the same (config option) in  	 * little-endian format, unless set in policy  	 */ -	ima_putc(m, &e->pcr, sizeof(e->pcr)); +	pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr); +	ima_putc(m, &pcr, sizeof(e->pcr));  	/* 2nd: template digest */  	ima_putc(m, e->digest, TPM_DIGEST_SIZE);  	/* 3rd: template name size */ -	namelen = strlen(template_name); +	namelen = !ima_canonical_fmt ? strlen(template_name) : +		cpu_to_le32(strlen(template_name));  	ima_putc(m, &namelen, sizeof(namelen));  	/* 4th:  template name */ -	ima_putc(m, template_name, namelen); +	ima_putc(m, template_name, strlen(template_name));  	/* 5th:  template length (except for 'ima' template) */  	if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)  		is_ima_template = true; -	if (!is_ima_template) -		ima_putc(m, &e->template_data_len, -			 sizeof(e->template_data_len)); +	if (!is_ima_template) { +		template_data_len = !ima_canonical_fmt ? e->template_data_len : +			cpu_to_le32(e->template_data_len); +		ima_putc(m, &template_data_len, sizeof(e->template_data_len)); +	}  	/* 6th:  template specific data */  	for (i = 0; i < e->template_desc->num_fields; i++) { @@ -401,7 +415,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)  	const char *cause = valid_policy ? "completed" : "failed";  	if ((file->f_flags & O_ACCMODE) == O_RDONLY) -		return 0; +		return seq_release(inode, file);  	if (valid_policy && ima_check_policy() < 0) {  		cause = "failed"; diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 32912bd54ead..2967d497a665 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c @@ -115,7 +115,8 @@ int __init ima_init(void)  		ima_used_chip = 1;  	if (!ima_used_chip) -		pr_info("No TPM chip found, activating TPM-bypass!\n"); +		pr_info("No TPM chip found, activating TPM-bypass! (rc=%d)\n", +			rc);  	rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);  	if (rc) @@ -128,6 +129,8 @@ int __init ima_init(void)  	if (rc != 0)  		return rc; +	ima_load_kexec_buffer(); +  	rc = ima_add_boot_aggregate();	/* boot aggregate must be first entry */  	if (rc != 0)  		return rc; diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c new file mode 100644 index 000000000000..e473eee913cb --- /dev/null +++ b/security/integrity/ima/ima_kexec.c @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2016 IBM Corporation + * + * Authors: + * Thiago Jung Bauermann <[email protected]> + * Mimi Zohar <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include <linux/seq_file.h> +#include <linux/vmalloc.h> +#include <linux/kexec.h> +#include "ima.h" + +#ifdef CONFIG_IMA_KEXEC +static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer, +				     unsigned long segment_size) +{ +	struct ima_queue_entry *qe; +	struct seq_file file; +	struct ima_kexec_hdr khdr; +	int ret = 0; + +	/* segment size can't change between kexec load and execute */ +	file.buf = vmalloc(segment_size); +	if (!file.buf) { +		ret = -ENOMEM; +		goto out; +	} + +	file.size = segment_size; +	file.read_pos = 0; +	file.count = sizeof(khdr);	/* reserved space */ + +	memset(&khdr, 0, sizeof(khdr)); +	khdr.version = 1; +	list_for_each_entry_rcu(qe, &ima_measurements, later) { +		if (file.count < file.size) { +			khdr.count++; +			ima_measurements_show(&file, qe); +		} else { +			ret = -EINVAL; +			break; +		} +	} + +	if (ret < 0) +		goto out; + +	/* +	 * fill in reserved space with some buffer details +	 * (eg. version, buffer size, number of measurements) +	 */ +	khdr.buffer_size = file.count; +	if (ima_canonical_fmt) { +		khdr.version = cpu_to_le16(khdr.version); +		khdr.count = cpu_to_le64(khdr.count); +		khdr.buffer_size = cpu_to_le64(khdr.buffer_size); +	} +	memcpy(file.buf, &khdr, sizeof(khdr)); + +	print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE, +			16, 1, file.buf, +			file.count < 100 ? file.count : 100, true); + +	*buffer_size = file.count; +	*buffer = file.buf; +out: +	if (ret == -EINVAL) +		vfree(file.buf); +	return ret; +} + +/* + * Called during kexec_file_load so that IMA can add a segment to the kexec + * image for the measurement list for the next kernel. + * + * This function assumes that kexec_mutex is held. + */ +void ima_add_kexec_buffer(struct kimage *image) +{ +	struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE, +				  .buf_min = 0, .buf_max = ULONG_MAX, +				  .top_down = true }; +	unsigned long binary_runtime_size; + +	/* use more understandable variable names than defined in kbuf */ +	void *kexec_buffer = NULL; +	size_t kexec_buffer_size; +	size_t kexec_segment_size; +	int ret; + +	/* +	 * Reserve an extra half page of memory for additional measurements +	 * added during the kexec load. +	 */ +	binary_runtime_size = ima_get_binary_runtime_size(); +	if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE) +		kexec_segment_size = ULONG_MAX; +	else +		kexec_segment_size = ALIGN(ima_get_binary_runtime_size() + +					   PAGE_SIZE / 2, PAGE_SIZE); +	if ((kexec_segment_size == ULONG_MAX) || +	    ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) { +		pr_err("Binary measurement list too large.\n"); +		return; +	} + +	ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer, +				  kexec_segment_size); +	if (!kexec_buffer) { +		pr_err("Not enough memory for the kexec measurement buffer.\n"); +		return; +	} + +	kbuf.buffer = kexec_buffer; +	kbuf.bufsz = kexec_buffer_size; +	kbuf.memsz = kexec_segment_size; +	ret = kexec_add_buffer(&kbuf); +	if (ret) { +		pr_err("Error passing over kexec measurement buffer.\n"); +		return; +	} + +	ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size); +	if (ret) { +		pr_err("Error passing over kexec measurement buffer.\n"); +		return; +	} + +	pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n", +		 kbuf.mem); +} +#endif /* IMA_KEXEC */ + +/* + * Restore the measurement list from the previous kernel. + */ +void ima_load_kexec_buffer(void) +{ +	void *kexec_buffer = NULL; +	size_t kexec_buffer_size = 0; +	int rc; + +	rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size); +	switch (rc) { +	case 0: +		rc = ima_restore_measurement_list(kexec_buffer_size, +						  kexec_buffer); +		if (rc != 0) +			pr_err("Failed to restore the measurement list: %d\n", +				rc); + +		ima_free_kexec_buffer(); +		break; +	case -ENOTSUPP: +		pr_debug("Restoring the measurement list not supported\n"); +		break; +	case -ENOENT: +		pr_debug("No measurement list to restore\n"); +		break; +	default: +		pr_debug("Error restoring the measurement list: %d\n", rc); +	} +} diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 423d111b3b94..50818c60538b 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -418,6 +418,7 @@ static int __init init_ima(void)  {  	int error; +	ima_init_template_list();  	hash_setup(CONFIG_IMA_DEFAULT_HASH);  	error = ima_init();  	if (!error) { diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index 32f6ac0f96df..d9aa5ab71204 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c @@ -29,6 +29,11 @@  #define AUDIT_CAUSE_LEN_MAX 32  LIST_HEAD(ima_measurements);	/* list of all measurements */ +#ifdef CONFIG_IMA_KEXEC +static unsigned long binary_runtime_size; +#else +static unsigned long binary_runtime_size = ULONG_MAX; +#endif  /* key: inode (before secure-hashing a file) */  struct ima_h_table ima_htable = { @@ -64,12 +69,32 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,  	return ret;  } +/* + * Calculate the memory required for serializing a single + * binary_runtime_measurement list entry, which contains a + * couple of variable length fields (e.g template name and data). + */ +static int get_binary_runtime_size(struct ima_template_entry *entry) +{ +	int size = 0; + +	size += sizeof(u32);	/* pcr */ +	size += sizeof(entry->digest); +	size += sizeof(int);	/* template name size field */ +	size += strlen(entry->template_desc->name) + 1; +	size += sizeof(entry->template_data_len); +	size += entry->template_data_len; +	return size; +} +  /* ima_add_template_entry helper function: - * - Add template entry to measurement list and hash table. + * - Add template entry to the measurement list and hash table, for + *   all entries except those carried across kexec.   *   * (Called with ima_extend_list_mutex held.)   */ -static int ima_add_digest_entry(struct ima_template_entry *entry) +static int ima_add_digest_entry(struct ima_template_entry *entry, +				bool update_htable)  {  	struct ima_queue_entry *qe;  	unsigned int key; @@ -85,11 +110,34 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)  	list_add_tail_rcu(&qe->later, &ima_measurements);  	atomic_long_inc(&ima_htable.len); -	key = ima_hash_key(entry->digest); -	hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); +	if (update_htable) { +		key = ima_hash_key(entry->digest); +		hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); +	} + +	if (binary_runtime_size != ULONG_MAX) { +		int size; + +		size = get_binary_runtime_size(entry); +		binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ? +		     binary_runtime_size + size : ULONG_MAX; +	}  	return 0;  } +/* + * Return the amount of memory required for serializing the + * entire binary_runtime_measurement list, including the ima_kexec_hdr + * structure. + */ +unsigned long ima_get_binary_runtime_size(void) +{ +	if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr))) +		return ULONG_MAX; +	else +		return binary_runtime_size + sizeof(struct ima_kexec_hdr); +}; +  static int ima_pcr_extend(const u8 *hash, int pcr)  {  	int result = 0; @@ -103,8 +151,13 @@ static int ima_pcr_extend(const u8 *hash, int pcr)  	return result;  } -/* Add template entry to the measurement list and hash table, - * and extend the pcr. +/* + * Add template entry to the measurement list and hash table, and + * extend the pcr. + * + * On systems which support carrying the IMA measurement list across + * kexec, maintain the total memory size required for serializing the + * binary_runtime_measurements.   */  int ima_add_template_entry(struct ima_template_entry *entry, int violation,  			   const char *op, struct inode *inode, @@ -126,7 +179,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,  		}  	} -	result = ima_add_digest_entry(entry); +	result = ima_add_digest_entry(entry, 1);  	if (result < 0) {  		audit_cause = "ENOMEM";  		audit_info = 0; @@ -149,3 +202,13 @@ out:  			    op, audit_cause, result, audit_info);  	return result;  } + +int ima_restore_measurement_entry(struct ima_template_entry *entry) +{ +	int result = 0; + +	mutex_lock(&ima_extend_list_mutex); +	result = ima_add_digest_entry(entry, 0); +	mutex_unlock(&ima_extend_list_mutex); +	return result; +} diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c index febd12ed9b55..cebb37c63629 100644 --- a/security/integrity/ima/ima_template.c +++ b/security/integrity/ima/ima_template.c @@ -15,16 +15,20 @@  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/rculist.h>  #include "ima.h"  #include "ima_template_lib.h" -static struct ima_template_desc defined_templates[] = { +static struct ima_template_desc builtin_templates[] = {  	{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},  	{.name = "ima-ng", .fmt = "d-ng|n-ng"},  	{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},  	{.name = "", .fmt = ""},	/* placeholder for a custom format */  }; +static LIST_HEAD(defined_templates); +static DEFINE_SPINLOCK(template_list); +  static struct ima_template_field supported_fields[] = {  	{.field_id = "d", .field_init = ima_eventdigest_init,  	 .field_show = ima_show_template_digest}, @@ -37,6 +41,7 @@ static struct ima_template_field supported_fields[] = {  	{.field_id = "sig", .field_init = ima_eventsig_init,  	 .field_show = ima_show_template_sig},  }; +#define MAX_TEMPLATE_NAME_LEN 15  static struct ima_template_desc *ima_template;  static struct ima_template_desc *lookup_template_desc(const char *name); @@ -52,6 +57,8 @@ static int __init ima_template_setup(char *str)  	if (ima_template)  		return 1; +	ima_init_template_list(); +  	/*  	 * Verify that a template with the supplied name exists.  	 * If not, use CONFIG_IMA_DEFAULT_TEMPLATE. @@ -80,7 +87,7 @@ __setup("ima_template=", ima_template_setup);  static int __init ima_template_fmt_setup(char *str)  { -	int num_templates = ARRAY_SIZE(defined_templates); +	int num_templates = ARRAY_SIZE(builtin_templates);  	if (ima_template)  		return 1; @@ -91,22 +98,28 @@ static int __init ima_template_fmt_setup(char *str)  		return 1;  	} -	defined_templates[num_templates - 1].fmt = str; -	ima_template = defined_templates + num_templates - 1; +	builtin_templates[num_templates - 1].fmt = str; +	ima_template = builtin_templates + num_templates - 1; +  	return 1;  }  __setup("ima_template_fmt=", ima_template_fmt_setup);  static struct ima_template_desc *lookup_template_desc(const char *name)  { -	int i; - -	for (i = 0; i < ARRAY_SIZE(defined_templates); i++) { -		if (strcmp(defined_templates[i].name, name) == 0) -			return defined_templates + i; +	struct ima_template_desc *template_desc; +	int found = 0; + +	rcu_read_lock(); +	list_for_each_entry_rcu(template_desc, &defined_templates, list) { +		if ((strcmp(template_desc->name, name) == 0) || +		    (strcmp(template_desc->fmt, name) == 0)) { +			found = 1; +			break; +		}  	} - -	return NULL; +	rcu_read_unlock(); +	return found ? template_desc : NULL;  }  static struct ima_template_field *lookup_template_field(const char *field_id) @@ -142,9 +155,14 @@ static int template_desc_init_fields(const char *template_fmt,  {  	const char *template_fmt_ptr;  	struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX]; -	int template_num_fields = template_fmt_size(template_fmt); +	int template_num_fields;  	int i, len; +	if (num_fields && *num_fields > 0) /* already initialized? */ +		return 0; + +	template_num_fields = template_fmt_size(template_fmt); +  	if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {  		pr_err("format string '%s' contains too many fields\n",  		       template_fmt); @@ -182,11 +200,28 @@ static int template_desc_init_fields(const char *template_fmt,  	return 0;  } +void ima_init_template_list(void) +{ +	int i; + +	if (!list_empty(&defined_templates)) +		return; + +	spin_lock(&template_list); +	for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) { +		list_add_tail_rcu(&builtin_templates[i].list, +				  &defined_templates); +	} +	spin_unlock(&template_list); +} +  struct ima_template_desc *ima_template_desc_current(void)  { -	if (!ima_template) +	if (!ima_template) { +		ima_init_template_list();  		ima_template =  		    lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE); +	}  	return ima_template;  } @@ -205,3 +240,239 @@ int __init ima_init_template(void)  	return result;  } + +static struct ima_template_desc *restore_template_fmt(char *template_name) +{ +	struct ima_template_desc *template_desc = NULL; +	int ret; + +	ret = template_desc_init_fields(template_name, NULL, NULL); +	if (ret < 0) { +		pr_err("attempting to initialize the template \"%s\" failed\n", +			template_name); +		goto out; +	} + +	template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL); +	if (!template_desc) +		goto out; + +	template_desc->name = ""; +	template_desc->fmt = kstrdup(template_name, GFP_KERNEL); +	if (!template_desc->fmt) +		goto out; + +	spin_lock(&template_list); +	list_add_tail_rcu(&template_desc->list, &defined_templates); +	spin_unlock(&template_list); +out: +	return template_desc; +} + +static int ima_restore_template_data(struct ima_template_desc *template_desc, +				     void *template_data, +				     int template_data_size, +				     struct ima_template_entry **entry) +{ +	struct binary_field_data { +		u32 len; +		u8 data[0]; +	} __packed; + +	struct binary_field_data *field_data; +	int offset = 0; +	int ret = 0; +	int i; + +	*entry = kzalloc(sizeof(**entry) + +		    template_desc->num_fields * sizeof(struct ima_field_data), +		    GFP_NOFS); +	if (!*entry) +		return -ENOMEM; + +	(*entry)->template_desc = template_desc; +	for (i = 0; i < template_desc->num_fields; i++) { +		field_data = template_data + offset; + +		/* Each field of the template data is prefixed with a length. */ +		if (offset > (template_data_size - sizeof(*field_data))) { +			pr_err("Restoring the template field failed\n"); +			ret = -EINVAL; +			break; +		} +		offset += sizeof(*field_data); + +		if (ima_canonical_fmt) +			field_data->len = le32_to_cpu(field_data->len); + +		if (offset > (template_data_size - field_data->len)) { +			pr_err("Restoring the template field data failed\n"); +			ret = -EINVAL; +			break; +		} +		offset += field_data->len; + +		(*entry)->template_data[i].len = field_data->len; +		(*entry)->template_data_len += sizeof(field_data->len); + +		(*entry)->template_data[i].data = +			kzalloc(field_data->len + 1, GFP_KERNEL); +		if (!(*entry)->template_data[i].data) { +			ret = -ENOMEM; +			break; +		} +		memcpy((*entry)->template_data[i].data, field_data->data, +			field_data->len); +		(*entry)->template_data_len += field_data->len; +	} + +	if (ret < 0) { +		ima_free_template_entry(*entry); +		*entry = NULL; +	} + +	return ret; +} + +/* Restore the serialized binary measurement list without extending PCRs. */ +int ima_restore_measurement_list(loff_t size, void *buf) +{ +	struct binary_hdr_v1 { +		u32 pcr; +		u8 digest[TPM_DIGEST_SIZE]; +		u32 template_name_len; +		char template_name[0]; +	} __packed; +	char template_name[MAX_TEMPLATE_NAME_LEN]; + +	struct binary_data_v1 { +		u32 template_data_size; +		char template_data[0]; +	} __packed; + +	struct ima_kexec_hdr *khdr = buf; +	struct binary_hdr_v1 *hdr_v1; +	struct binary_data_v1 *data_v1; + +	void *bufp = buf + sizeof(*khdr); +	void *bufendp; +	struct ima_template_entry *entry; +	struct ima_template_desc *template_desc; +	unsigned long count = 0; +	int ret = 0; + +	if (!buf || size < sizeof(*khdr)) +		return 0; + +	if (ima_canonical_fmt) { +		khdr->version = le16_to_cpu(khdr->version); +		khdr->count = le64_to_cpu(khdr->count); +		khdr->buffer_size = le64_to_cpu(khdr->buffer_size); +	} + +	if (khdr->version != 1) { +		pr_err("attempting to restore a incompatible measurement list"); +		return -EINVAL; +	} + +	if (khdr->count > ULONG_MAX - 1) { +		pr_err("attempting to restore too many measurements"); +		return -EINVAL; +	} + +	/* +	 * ima kexec buffer prefix: version, buffer size, count +	 * v1 format: pcr, digest, template-name-len, template-name, +	 *	      template-data-size, template-data +	 */ +	bufendp = buf + khdr->buffer_size; +	while ((bufp < bufendp) && (count++ < khdr->count)) { +		hdr_v1 = bufp; +		if (bufp > (bufendp - sizeof(*hdr_v1))) { +			pr_err("attempting to restore partial measurement\n"); +			ret = -EINVAL; +			break; +		} +		bufp += sizeof(*hdr_v1); + +		if (ima_canonical_fmt) +			hdr_v1->template_name_len = +			    le32_to_cpu(hdr_v1->template_name_len); + +		if ((hdr_v1->template_name_len >= MAX_TEMPLATE_NAME_LEN) || +		    (bufp > (bufendp - hdr_v1->template_name_len))) { +			pr_err("attempting to restore a template name \ +				that is too long\n"); +			ret = -EINVAL; +			break; +		} +		data_v1 = bufp += (u_int8_t)hdr_v1->template_name_len; + +		/* template name is not null terminated */ +		memcpy(template_name, hdr_v1->template_name, +		       hdr_v1->template_name_len); +		template_name[hdr_v1->template_name_len] = 0; + +		if (strcmp(template_name, "ima") == 0) { +			pr_err("attempting to restore an unsupported \ +				template \"%s\" failed\n", template_name); +			ret = -EINVAL; +			break; +		} + +		template_desc = lookup_template_desc(template_name); +		if (!template_desc) { +			template_desc = restore_template_fmt(template_name); +			if (!template_desc) +				break; +		} + +		/* +		 * Only the running system's template format is initialized +		 * on boot.  As needed, initialize the other template formats. +		 */ +		ret = template_desc_init_fields(template_desc->fmt, +						&(template_desc->fields), +						&(template_desc->num_fields)); +		if (ret < 0) { +			pr_err("attempting to restore the template fmt \"%s\" \ +				failed\n", template_desc->fmt); +			ret = -EINVAL; +			break; +		} + +		if (bufp > (bufendp - sizeof(data_v1->template_data_size))) { +			pr_err("restoring the template data size failed\n"); +			ret = -EINVAL; +			break; +		} +		bufp += (u_int8_t) sizeof(data_v1->template_data_size); + +		if (ima_canonical_fmt) +			data_v1->template_data_size = +			    le32_to_cpu(data_v1->template_data_size); + +		if (bufp > (bufendp - data_v1->template_data_size)) { +			pr_err("restoring the template data failed\n"); +			ret = -EINVAL; +			break; +		} +		bufp += data_v1->template_data_size; + +		ret = ima_restore_template_data(template_desc, +						data_v1->template_data, +						data_v1->template_data_size, +						&entry); +		if (ret < 0) +			break; + +		memcpy(entry->digest, hdr_v1->digest, TPM_DIGEST_SIZE); +		entry->pcr = +		    !ima_canonical_fmt ? hdr_v1->pcr : le32_to_cpu(hdr_v1->pcr); +		ret = ima_restore_measurement_entry(entry); +		if (ret < 0) +			break; + +	} +	return ret; +} diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index f9bae04ba176..f9ba37b3928d 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -103,8 +103,11 @@ static void ima_show_template_data_binary(struct seq_file *m,  	u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?  	    strlen(field_data->data) : field_data->len; -	if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) -		ima_putc(m, &len, sizeof(len)); +	if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) { +		u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len); + +		ima_putc(m, &field_len, sizeof(field_len)); +	}  	if (!len)  		return; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index d580ad06b792..04a764f71ec8 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -23,7 +23,7 @@  #include <linux/vmalloc.h>  #include <linux/security.h>  #include <linux/uio.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h>  #include "internal.h"  #define KEY_MAX_DESC_SIZE 4096 @@ -1074,7 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,  		}  		ret = -EFAULT; -		if (copy_from_iter(payload, plen, from) != plen) +		if (!copy_from_iter_full(payload, plen, from))  			goto error2;  	} diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 40a885239782..918cddcd4516 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -18,7 +18,7 @@  #include <linux/mutex.h>  #include <linux/security.h>  #include <linux/user_namespace.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h>  #include "internal.h"  /* Session keyring create vs join semaphore */ diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 9db8b4a82787..6bbe2f535f08 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -16,7 +16,7 @@  #include <linux/err.h>  #include <linux/seq_file.h>  #include <linux/slab.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h>  #include "internal.h"  #include <keys/user-type.h> diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 66b1840b4110..e187c8909d9d 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -15,7 +15,7 @@  #include <linux/seq_file.h>  #include <linux/err.h>  #include <keys/user-type.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h>  #include "internal.h"  static int logon_vet_description(const char *desc); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 38b79d797aaf..d98550abe16d 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -231,12 +231,13 @@ static int inode_alloc_security(struct inode *inode)  	if (!isec)  		return -ENOMEM; -	mutex_init(&isec->lock); +	spin_lock_init(&isec->lock);  	INIT_LIST_HEAD(&isec->list);  	isec->inode = inode;  	isec->sid = SECINITSID_UNLABELED;  	isec->sclass = SECCLASS_FILE;  	isec->task_sid = sid; +	isec->initialized = LABEL_INVALID;  	inode->i_security = isec;  	return 0; @@ -247,7 +248,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  /*   * Try reloading inode security labels that have been marked as invalid.  The   * @may_sleep parameter indicates when sleeping and thus reloading labels is - * allowed; when set to false, returns ERR_PTR(-ECHILD) when the label is + * allowed; when set to false, returns -ECHILD when the label is   * invalid.  The @opt_dentry parameter should be set to a dentry of the inode;   * when no dentry is available, set it to NULL instead.   */ @@ -1100,11 +1101,12 @@ static int selinux_parse_opts_str(char *options,  	}  	rc = -ENOMEM; -	opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_ATOMIC); +	opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_KERNEL);  	if (!opts->mnt_opts)  		goto out_err; -	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC); +	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), +				       GFP_KERNEL);  	if (!opts->mnt_opts_flags) {  		kfree(opts->mnt_opts);  		goto out_err; @@ -1380,7 +1382,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  {  	struct superblock_security_struct *sbsec = NULL;  	struct inode_security_struct *isec = inode->i_security; -	u32 sid; +	u32 task_sid, sid = 0; +	u16 sclass;  	struct dentry *dentry;  #define INITCONTEXTLEN 255  	char *context = NULL; @@ -1388,12 +1391,15 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  	int rc = 0;  	if (isec->initialized == LABEL_INITIALIZED) -		goto out; +		return 0; -	mutex_lock(&isec->lock); +	spin_lock(&isec->lock);  	if (isec->initialized == LABEL_INITIALIZED)  		goto out_unlock; +	if (isec->sclass == SECCLASS_FILE) +		isec->sclass = inode_mode_to_security_class(inode->i_mode); +  	sbsec = inode->i_sb->s_security;  	if (!(sbsec->flags & SE_SBINITIALIZED)) {  		/* Defer initialization until selinux_complete_init, @@ -1406,12 +1412,18 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  		goto out_unlock;  	} +	sclass = isec->sclass; +	task_sid = isec->task_sid; +	sid = isec->sid; +	isec->initialized = LABEL_PENDING; +	spin_unlock(&isec->lock); +  	switch (sbsec->behavior) {  	case SECURITY_FS_USE_NATIVE:  		break;  	case SECURITY_FS_USE_XATTR:  		if (!(inode->i_opflags & IOP_XATTR)) { -			isec->sid = sbsec->def_sid; +			sid = sbsec->def_sid;  			break;  		}  		/* Need a dentry, since the xattr API requires one. @@ -1433,7 +1445,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  			 * inode_doinit with a dentry, before these inodes could  			 * be used again by userspace.  			 */ -			goto out_unlock; +			goto out;  		}  		len = INITCONTEXTLEN; @@ -1441,7 +1453,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  		if (!context) {  			rc = -ENOMEM;  			dput(dentry); -			goto out_unlock; +			goto out;  		}  		context[len] = '\0';  		rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len); @@ -1452,14 +1464,14 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  			rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, NULL, 0);  			if (rc < 0) {  				dput(dentry); -				goto out_unlock; +				goto out;  			}  			len = rc;  			context = kmalloc(len+1, GFP_NOFS);  			if (!context) {  				rc = -ENOMEM;  				dput(dentry); -				goto out_unlock; +				goto out;  			}  			context[len] = '\0';  			rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len); @@ -1471,7 +1483,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  				       "%d for dev=%s ino=%ld\n", __func__,  				       -rc, inode->i_sb->s_id, inode->i_ino);  				kfree(context); -				goto out_unlock; +				goto out;  			}  			/* Map ENODATA to the default file SID */  			sid = sbsec->def_sid; @@ -1501,29 +1513,25 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  			}  		}  		kfree(context); -		isec->sid = sid;  		break;  	case SECURITY_FS_USE_TASK: -		isec->sid = isec->task_sid; +		sid = task_sid;  		break;  	case SECURITY_FS_USE_TRANS:  		/* Default to the fs SID. */ -		isec->sid = sbsec->sid; +		sid = sbsec->sid;  		/* Try to obtain a transition SID. */ -		isec->sclass = inode_mode_to_security_class(inode->i_mode); -		rc = security_transition_sid(isec->task_sid, sbsec->sid, -					     isec->sclass, NULL, &sid); +		rc = security_transition_sid(task_sid, sid, sclass, NULL, &sid);  		if (rc) -			goto out_unlock; -		isec->sid = sid; +			goto out;  		break;  	case SECURITY_FS_USE_MNTPOINT: -		isec->sid = sbsec->mntpoint_sid; +		sid = sbsec->mntpoint_sid;  		break;  	default:  		/* Default to the fs superblock SID. */ -		isec->sid = sbsec->sid; +		sid = sbsec->sid;  		if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {  			/* We must have a dentry to determine the label on @@ -1546,25 +1554,30 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent  			 * could be used again by userspace.  			 */  			if (!dentry) -				goto out_unlock; -			isec->sclass = inode_mode_to_security_class(inode->i_mode); -			rc = selinux_genfs_get_sid(dentry, isec->sclass, +				goto out; +			rc = selinux_genfs_get_sid(dentry, sclass,  						   sbsec->flags, &sid);  			dput(dentry);  			if (rc) -				goto out_unlock; -			isec->sid = sid; +				goto out;  		}  		break;  	} -	isec->initialized = LABEL_INITIALIZED; +out: +	spin_lock(&isec->lock); +	if (isec->initialized == LABEL_PENDING) { +		if (!sid || rc) { +			isec->initialized = LABEL_INVALID; +			goto out_unlock; +		} + +		isec->initialized = LABEL_INITIALIZED; +		isec->sid = sid; +	}  out_unlock: -	mutex_unlock(&isec->lock); -out: -	if (isec->sclass == SECCLASS_FILE) -		isec->sclass = inode_mode_to_security_class(inode->i_mode); +	spin_unlock(&isec->lock);  	return rc;  } @@ -3198,9 +3211,11 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,  	}  	isec = backing_inode_security(dentry); +	spin_lock(&isec->lock);  	isec->sclass = inode_mode_to_security_class(inode->i_mode);  	isec->sid = newsid;  	isec->initialized = LABEL_INITIALIZED; +	spin_unlock(&isec->lock);  	return;  } @@ -3293,9 +3308,11 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,  	if (rc)  		return rc; +	spin_lock(&isec->lock);  	isec->sclass = inode_mode_to_security_class(inode->i_mode);  	isec->sid = newsid;  	isec->initialized = LABEL_INITIALIZED; +	spin_unlock(&isec->lock);  	return 0;  } @@ -3956,8 +3973,11 @@ static void selinux_task_to_inode(struct task_struct *p,  	struct inode_security_struct *isec = inode->i_security;  	u32 sid = task_sid(p); +	spin_lock(&isec->lock); +	isec->sclass = inode_mode_to_security_class(inode->i_mode);  	isec->sid = sid;  	isec->initialized = LABEL_INITIALIZED; +	spin_unlock(&isec->lock);  }  /* Returns error only if unable to parse addresses */ @@ -4276,24 +4296,24 @@ static int selinux_socket_post_create(struct socket *sock, int family,  	const struct task_security_struct *tsec = current_security();  	struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(sock));  	struct sk_security_struct *sksec; +	u16 sclass = socket_type_to_security_class(family, type, protocol); +	u32 sid = SECINITSID_KERNEL;  	int err = 0; -	isec->sclass = socket_type_to_security_class(family, type, protocol); - -	if (kern) -		isec->sid = SECINITSID_KERNEL; -	else { -		err = socket_sockcreate_sid(tsec, isec->sclass, &(isec->sid)); +	if (!kern) { +		err = socket_sockcreate_sid(tsec, sclass, &sid);  		if (err)  			return err;  	} +	isec->sclass = sclass; +	isec->sid = sid;  	isec->initialized = LABEL_INITIALIZED;  	if (sock->sk) {  		sksec = sock->sk->sk_security; -		sksec->sid = isec->sid; -		sksec->sclass = isec->sclass; +		sksec->sclass = sclass; +		sksec->sid = sid;  		err = selinux_netlbl_socket_post_create(sock->sk, family);  	} @@ -4469,16 +4489,22 @@ static int selinux_socket_accept(struct socket *sock, struct socket *newsock)  	int err;  	struct inode_security_struct *isec;  	struct inode_security_struct *newisec; +	u16 sclass; +	u32 sid;  	err = sock_has_perm(current, sock->sk, SOCKET__ACCEPT);  	if (err)  		return err; -	newisec = inode_security_novalidate(SOCK_INODE(newsock)); -  	isec = inode_security_novalidate(SOCK_INODE(sock)); -	newisec->sclass = isec->sclass; -	newisec->sid = isec->sid; +	spin_lock(&isec->lock); +	sclass = isec->sclass; +	sid = isec->sid; +	spin_unlock(&isec->lock); + +	newisec = inode_security_novalidate(SOCK_INODE(newsock)); +	newisec->sclass = sclass; +	newisec->sid = sid;  	newisec->initialized = LABEL_INITIALIZED;  	return 0; @@ -5861,7 +5887,7 @@ static int selinux_setprocattr(struct task_struct *p,  		return error;  	/* Obtain a SID for the context, if one was specified. */ -	if (size && str[1] && str[1] != '\n') { +	if (size && str[0] && str[0] != '\n') {  		if (str[size-1] == '\n') {  			str[size-1] = 0;  			size--; @@ -5981,9 +6007,9 @@ static void selinux_inode_invalidate_secctx(struct inode *inode)  {  	struct inode_security_struct *isec = inode->i_security; -	mutex_lock(&isec->lock); +	spin_lock(&isec->lock);  	isec->initialized = LABEL_INVALID; -	mutex_unlock(&isec->lock); +	spin_unlock(&isec->lock);  }  /* diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 1f1f4b2f6018..13ae49b0baa0 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -1,3 +1,5 @@ +#include <linux/capability.h> +  #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \      "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append" @@ -24,6 +26,10 @@  #define COMMON_CAP2_PERMS  "mac_override", "mac_admin", "syslog", \  		"wake_alarm", "block_suspend", "audit_read" +#if CAP_LAST_CAP > CAP_AUDIT_READ +#error New capability defined, please update COMMON_CAP2_PERMS. +#endif +  /*   * Note: The name for any socket class should be suffixed by "socket",   *	 and doesn't contain more than one substr of "socket". diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index c21e135460a5..e8dab0f02c72 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -39,7 +39,8 @@ struct task_security_struct {  enum label_initialized {  	LABEL_INVALID,		/* invalid or not initialized */ -	LABEL_INITIALIZED	/* initialized */ +	LABEL_INITIALIZED,	/* initialized */ +	LABEL_PENDING  };  struct inode_security_struct { @@ -52,7 +53,7 @@ struct inode_security_struct {  	u32 sid;		/* SID of this object */  	u16 sclass;		/* security class of this object */  	unsigned char initialized;	/* initialization flag */ -	struct mutex lock; +	spinlock_t lock;  };  struct file_security_struct { diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 72c145dd799f..cf9293e01fc1 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -163,6 +163,8 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,  	if (sscanf(page, "%d", &new_value) != 1)  		goto out; +	new_value = !!new_value; +  	if (new_value != selinux_enforcing) {  		length = task_has_security(current, SECURITY__SETENFORCE);  		if (length) @@ -1301,7 +1303,7 @@ static int sel_make_bools(void)  			goto out;  		isec->sid = sid; -		isec->initialized = 1; +		isec->initialized = LABEL_INITIALIZED;  		inode->i_fop = &sel_bool_ops;  		inode->i_ino = i|SEL_BOOL_INO_OFFSET;  		d_add(dentry, inode); @@ -1834,7 +1836,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)  	isec = (struct inode_security_struct *)inode->i_security;  	isec->sid = SECINITSID_DEVNULL;  	isec->sclass = SECCLASS_CHR_FILE; -	isec->initialized = 1; +	isec->initialized = LABEL_INITIALIZED;  	init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));  	d_add(dentry, inode); diff --git a/security/smack/smack.h b/security/smack/smack.h index 51fd30192c08..77abe2efacae 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -336,7 +336,6 @@ extern int smack_ptrace_rule;  extern struct smack_known smack_known_floor;  extern struct smack_known smack_known_hat;  extern struct smack_known smack_known_huh; -extern struct smack_known smack_known_invalid;  extern struct smack_known smack_known_star;  extern struct smack_known smack_known_web; diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index 23e5808a0970..356e3764cad9 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -36,11 +36,6 @@ struct smack_known smack_known_floor = {  	.smk_secid	= 5,  }; -struct smack_known smack_known_invalid = { -	.smk_known	= "", -	.smk_secid	= 6, -}; -  struct smack_known smack_known_web = {  	.smk_known	= "@",  	.smk_secid	= 7, @@ -615,7 +610,7 @@ struct smack_known *smack_from_secid(const u32 secid)  	 * of a secid that is not on the list.  	 */  	rcu_read_unlock(); -	return &smack_known_invalid; +	return &smack_known_huh;  }  /* diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 1cb060293505..94dc9d406ce3 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -225,7 +225,7 @@ static int smk_bu_credfile(const struct cred *cred, struct file *file,  {  	struct task_smack *tsp = cred->security;  	struct smack_known *sskp = tsp->smk_task; -	struct inode *inode = file->f_inode; +	struct inode *inode = file_inode(file);  	struct inode_smack *isp = inode->i_security;  	char acc[SMK_NUM_ACCESS_TYPE + 1]; @@ -692,12 +692,12 @@ static int smack_parse_opts_str(char *options,  		}  	} -	opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_ATOMIC); +	opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_KERNEL);  	if (!opts->mnt_opts)  		goto out_err;  	opts->mnt_opts_flags = kcalloc(NUM_SMK_MNT_OPTS, sizeof(int), -			GFP_ATOMIC); +			GFP_KERNEL);  	if (!opts->mnt_opts_flags) {  		kfree(opts->mnt_opts);  		goto out_err; @@ -769,6 +769,31 @@ static int smack_set_mnt_opts(struct super_block *sb,  	if (sp->smk_flags & SMK_SB_INITIALIZED)  		return 0; +	if (!smack_privileged(CAP_MAC_ADMIN)) { +		/* +		 * Unprivileged mounts don't get to specify Smack values. +		 */ +		if (num_opts) +			return -EPERM; +		/* +		 * Unprivileged mounts get root and default from the caller. +		 */ +		skp = smk_of_current(); +		sp->smk_root = skp; +		sp->smk_default = skp; +		/* +		 * For a handful of fs types with no user-controlled +		 * backing store it's okay to trust security labels +		 * in the filesystem. The rest are untrusted. +		 */ +		if (sb->s_user_ns != &init_user_ns && +		    sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC && +		    sb->s_magic != RAMFS_MAGIC) { +			transmute = 1; +			sp->smk_flags |= SMK_SB_UNTRUSTED; +		} +	} +  	sp->smk_flags |= SMK_SB_INITIALIZED;  	for (i = 0; i < num_opts; i++) { @@ -809,31 +834,6 @@ static int smack_set_mnt_opts(struct super_block *sb,  		}  	} -	if (!smack_privileged(CAP_MAC_ADMIN)) { -		/* -		 * Unprivileged mounts don't get to specify Smack values. -		 */ -		if (num_opts) -			return -EPERM; -		/* -		 * Unprivileged mounts get root and default from the caller. -		 */ -		skp = smk_of_current(); -		sp->smk_root = skp; -		sp->smk_default = skp; -		/* -		 * For a handful of fs types with no user-controlled -		 * backing store it's okay to trust security labels -		 * in the filesystem. The rest are untrusted. -		 */ -		if (sb->s_user_ns != &init_user_ns && -		    sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC && -		    sb->s_magic != RAMFS_MAGIC) { -			transmute = 1; -			sp->smk_flags |= SMK_SB_UNTRUSTED; -		} -	} -  	/*  	 * Initialize the root inode.  	 */ @@ -1384,20 +1384,14 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,  		skp = smk_import_entry(value, size);  		if (!IS_ERR(skp))  			isp->smk_inode = skp; -		else -			isp->smk_inode = &smack_known_invalid;  	} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {  		skp = smk_import_entry(value, size);  		if (!IS_ERR(skp))  			isp->smk_task = skp; -		else -			isp->smk_task = &smack_known_invalid;  	} else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {  		skp = smk_import_entry(value, size);  		if (!IS_ERR(skp))  			isp->smk_mmap = skp; -		else -			isp->smk_mmap = &smack_known_invalid;  	}  	return; @@ -2023,6 +2017,8 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,  	if (new_tsp == NULL)  		return -ENOMEM; +	new->security = new_tsp; +  	rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);  	if (rc != 0)  		return rc; @@ -2032,7 +2028,6 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,  	if (rc != 0)  		return rc; -	new->security = new_tsp;  	return 0;  } @@ -2067,12 +2062,8 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)  static int smack_kernel_act_as(struct cred *new, u32 secid)  {  	struct task_smack *new_tsp = new->security; -	struct smack_known *skp = smack_from_secid(secid); -	if (skp == NULL) -		return -EINVAL; - -	new_tsp->smk_task = skp; +	new_tsp->smk_task = smack_from_secid(secid);  	return 0;  } @@ -2337,8 +2328,16 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)  	if (ssp == NULL)  		return -ENOMEM; -	ssp->smk_in = skp; -	ssp->smk_out = skp; +	/* +	 * Sockets created by kernel threads receive web label. +	 */ +	if (unlikely(current->flags & PF_KTHREAD)) { +		ssp->smk_in = &smack_known_web; +		ssp->smk_out = &smack_known_web; +	} else { +		ssp->smk_in = skp; +		ssp->smk_out = skp; +	}  	ssp->smk_packet = NULL;  	sk->sk_security = ssp; @@ -2435,17 +2434,17 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)  	list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {  		/* +		 * If the label is NULL the entry has +		 * been renounced. Ignore it. +		 */ +		if (snp->smk_label == NULL) +			continue; +		/*  		* we break after finding the first match because  		* the list is sorted from longest to shortest mask  		* so we have found the most specific match  		*/  		for (found = 1, i = 0; i < 8; i++) { -			/* -			 * If the label is NULL the entry has -			 * been renounced. Ignore it. -			 */ -			if (snp->smk_label == NULL) -				continue;  			if ((sap->s6_addr16[i] & snp->smk_mask.s6_addr16[i]) !=  			    snp->smk_host.s6_addr16[i]) {  				found = 0; @@ -3661,10 +3660,11 @@ static int smack_setprocattr(struct task_struct *p, char *name,  		return PTR_ERR(skp);  	/* -	 * No process is ever allowed the web ("@") label. +	 * No process is ever allowed the web ("@") label +	 * and the star ("*") label.  	 */ -	if (skp == &smack_known_web) -		return -EPERM; +	if (skp == &smack_known_web || skp == &smack_known_star) +		return -EINVAL;  	if (!smack_privileged(CAP_MAC_ADMIN)) {  		rc = -EPERM; @@ -3884,21 +3884,11 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,  			return &smack_known_web;  		return &smack_known_star;  	} -	if ((sap->flags & NETLBL_SECATTR_SECID) != 0) { +	if ((sap->flags & NETLBL_SECATTR_SECID) != 0)  		/*  		 * Looks like a fallback, which gives us a secid.  		 */ -		skp = smack_from_secid(sap->attr.secid); -		/* -		 * This has got to be a bug because it is -		 * impossible to specify a fallback without -		 * specifying the label, which will ensure -		 * it has a secid, and the only way to get a -		 * secid is from a fallback. -		 */ -		BUG_ON(skp == NULL); -		return skp; -	} +		return smack_from_secid(sap->attr.secid);  	/*  	 * Without guidance regarding the smack value  	 * for the packet fall back on the network @@ -4761,7 +4751,6 @@ static __init void init_smack_known_list(void)  	mutex_init(&smack_known_hat.smk_rules_lock);  	mutex_init(&smack_known_floor.smk_rules_lock);  	mutex_init(&smack_known_star.smk_rules_lock); -	mutex_init(&smack_known_invalid.smk_rules_lock);  	mutex_init(&smack_known_web.smk_rules_lock);  	/*  	 * Initialize rule lists @@ -4770,7 +4759,6 @@ static __init void init_smack_known_list(void)  	INIT_LIST_HEAD(&smack_known_hat.smk_rules);  	INIT_LIST_HEAD(&smack_known_star.smk_rules);  	INIT_LIST_HEAD(&smack_known_floor.smk_rules); -	INIT_LIST_HEAD(&smack_known_invalid.smk_rules);  	INIT_LIST_HEAD(&smack_known_web.smk_rules);  	/*  	 * Create the known labels list @@ -4779,7 +4767,6 @@ static __init void init_smack_known_list(void)  	smk_insert_entry(&smack_known_hat);  	smk_insert_entry(&smack_known_star);  	smk_insert_entry(&smack_known_floor); -	smk_insert_entry(&smack_known_invalid);  	smk_insert_entry(&smack_known_web);  } diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 6492fe96cae4..13743a01b35b 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -2998,9 +2998,6 @@ static int __init init_smk_fs(void)  	rc = smk_preset_netlabel(&smack_known_huh);  	if (err == 0 && rc < 0)  		err = rc; -	rc = smk_preset_netlabel(&smack_known_invalid); -	if (err == 0 && rc < 0) -		err = rc;  	rc = smk_preset_netlabel(&smack_known_star);  	if (err == 0 && rc < 0)  		err = rc; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 682b73af7766..838ffa78cfda 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,  	 * the execve().  	 */  	if (get_user_pages_remote(current, bprm->mm, pos, 1, -				FOLL_FORCE, &page, NULL) <= 0) +				FOLL_FORCE, &page, NULL, NULL) <= 0)  		return false;  #else  	page = bprm->page[pos / PAGE_SIZE]; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 0309f2111c70..968e5e0a3f81 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -309,7 +309,7 @@ static int task_is_descendant(struct task_struct *parent,   * @tracer: the task_struct of the process attempting ptrace   * @tracee: the task_struct of the process to be ptraced   * - * Returns 1 if tracer has is ptracer exception ancestor for tracee. + * Returns 1 if tracer has a ptracer exception ancestor for tracee.   */  static int ptracer_exception_found(struct task_struct *tracer,  				   struct task_struct *tracee) @@ -320,6 +320,18 @@ static int ptracer_exception_found(struct task_struct *tracer,  	bool found = false;  	rcu_read_lock(); + +	/* +	 * If there's already an active tracing relationship, then make an +	 * exception for the sake of other accesses, like process_vm_rw(). +	 */ +	parent = ptrace_parent(tracee); +	if (parent != NULL && same_thread_group(parent, tracer)) { +		rc = 1; +		goto unlock; +	} + +	/* Look for a PR_SET_PTRACER relationship. */  	if (!thread_group_leader(tracee))  		tracee = rcu_dereference(tracee->group_leader);  	list_for_each_entry_rcu(relation, &ptracer_relations, node) { @@ -334,6 +346,8 @@ static int ptracer_exception_found(struct task_struct *tracer,  	if (found && (parent == NULL || task_is_descendant(parent, tracer)))  		rc = 1; + +unlock:  	rcu_read_unlock();  	return rc; |