From: Tushar Sugandhi <tusharsu@linux.microsoft.com> To: zohar@linux.ibm.com, roberto.sassu@huaweicloud.com, roberto.sassu@huawei.com, eric.snowberg@oracle.com, stefanb@linux.ibm.com, ebiederm@xmission.com, noodles@fb.com, bauermann@kolabnow.com, linux-integrity@vger.kernel.org, kexec@lists.infradead.org Cc: code@tyhicks.com, nramas@linux.microsoft.com, paul@paul-moore.com Subject: [PATCH v3 3/7] ima: kexec: map IMA buffer source pages to image after kexec load Date: Fri, 15 Dec 2023 17:07:25 -0800 [thread overview] Message-ID: <20231216010729.2904751-4-tusharsu@linux.microsoft.com> (raw) In-Reply-To: <20231216010729.2904751-1-tusharsu@linux.microsoft.com> Implement kimage_map_segment() to enable mapping of IMA buffer source pages to the kimage structure post kexec 'load'. This function, accepting a kimage pointer, an address, and a size, will gather the source pages within the specified address range, create an array of page pointers, and map these to a contiguous virtual address range. The function returns the start of this range if successful, or NULL if unsuccessful. Additionally, introduce kimage_unmap_segment() for unmapping segments using vunmap(). Introduce ima_kexec_post_load(), to be invoked by IMA following the kexec 'load' of the new Kernel image. This function will map the IMA buffer, allocated during kexec 'load', to a segment in the loaded image. Lastly, relocate the for_each_kimage_entry() macro from kexec_core.c to kexec.h for broader accessibility. Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com> --- include/linux/ima.h | 3 ++ include/linux/kexec.h | 13 +++++++ kernel/kexec_core.c | 59 +++++++++++++++++++++++++++--- security/integrity/ima/ima_kexec.c | 32 ++++++++++++++++ 4 files changed, 102 insertions(+), 5 deletions(-) diff --git a/include/linux/ima.h b/include/linux/ima.h index 86b57757c7b1..006db20f852d 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -49,6 +49,9 @@ static inline void ima_appraise_parse_cmdline(void) {} #ifdef CONFIG_IMA_KEXEC extern void ima_add_kexec_buffer(struct kimage *image); +extern void ima_kexec_post_load(struct kimage *image); +#else +static inline void ima_kexec_post_load(struct kimage *image) {} #endif #else diff --git a/include/linux/kexec.h b/include/linux/kexec.h index fd94404acc66..eb98aca7f4c7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -493,6 +493,15 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { } #endif +#define for_each_kimage_entry(image, ptr, entry) \ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ + ptr = (entry & IND_INDIRECTION) ? \ + boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) + +extern void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size); +extern void kimage_unmap_segment(void *buffer); + #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; @@ -500,6 +509,10 @@ static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } static inline int kexec_crash_loaded(void) { return 0; } +static inline void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size) +{ return NULL; } +static inline void kimage_unmap_segment(void *buffer) { } #define kexec_in_progress false #endif /* CONFIG_KEXEC_CORE */ diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 3d578c6fefee..26978ad02676 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -594,11 +594,6 @@ void kimage_terminate(struct kimage *image) *image->entry = IND_DONE; } -#define for_each_kimage_entry(image, ptr, entry) \ - for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ - ptr = (entry & IND_INDIRECTION) ? \ - boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) - static void kimage_free_entry(kimage_entry_t entry) { struct page *page; @@ -921,6 +916,60 @@ int kimage_load_segment(struct kimage *image, return result; } +void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size) +{ + unsigned long eaddr = addr + size; + unsigned long src_page_addr, dest_page_addr; + unsigned int npages; + struct page **src_pages; + int i; + kimage_entry_t *ptr, entry; + void *vaddr = NULL; + + /* + * Collect the source pages and map them in a contiguous VA range. + */ + npages = PFN_UP(eaddr) - PFN_DOWN(addr); + src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL); + if (!src_pages) { + pr_err("%s: Could not allocate ima pages array.\n", __func__); + return NULL; + } + + i = 0; + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_DESTINATION) + dest_page_addr = entry & PAGE_MASK; + else if (entry & IND_SOURCE) { + if (dest_page_addr >= addr && dest_page_addr < eaddr) { + src_page_addr = entry & PAGE_MASK; + src_pages[i++] = + virt_to_page(__va(src_page_addr)); + if (i == npages) + break; + dest_page_addr += PAGE_SIZE; + } + } + } + + /* Sanity check. */ + WARN_ON(i < npages); + + vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL); + kfree(src_pages); + + if (!vaddr) + pr_err("%s: Could not map imap buffer.\n", __func__); + + return vaddr; +} + +void kimage_unmap_segment(void *segment_buffer) +{ + vunmap(segment_buffer); +} + struct kexec_load_limit { /* Mutex protects the limit count. */ struct mutex mutex; diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 0063d5e7b634..55bd5362262e 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c @@ -12,12 +12,15 @@ #include <linux/kexec.h> #include <linux/of.h> #include <linux/ima.h> +#include <linux/reboot.h> +#include <asm/page.h> #include "ima.h" #ifdef CONFIG_IMA_KEXEC struct seq_file ima_kexec_file; static void *ima_kexec_buffer; static size_t kexec_segment_size; +static bool ima_kexec_update_registered; void ima_free_kexec_file_buf(struct seq_file *sf) { @@ -201,6 +204,7 @@ static int ima_update_kexec_buffer(struct notifier_block *self, } memcpy(ima_kexec_buffer, buf, buf_size); out: + kimage_unmap_segment(ima_kexec_buffer); ima_kexec_buffer = NULL; if (resume) @@ -213,6 +217,34 @@ struct notifier_block update_buffer_nb = { .notifier_call = ima_update_kexec_buffer, }; +/* + * Create a mapping for the source pages that contain the IMA buffer + * so we can update it later. + */ +void ima_kexec_post_load(struct kimage *image) +{ + if (ima_kexec_buffer) { + kimage_unmap_segment(ima_kexec_buffer); + ima_kexec_buffer = NULL; + } + + if (!image->ima_buffer_addr) + return; + + ima_kexec_buffer = kimage_map_segment(image, + image->ima_buffer_addr, + image->ima_buffer_size); + if (!ima_kexec_buffer) { + pr_err("%s: Could not map measurements buffer.\n", __func__); + return; + } + + if (!ima_kexec_update_registered) { + register_reboot_notifier(&update_buffer_nb); + ima_kexec_update_registered = true; + } +} + #endif /* IMA_KEXEC */ /* -- 2.25.1 _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec
WARNING: multiple messages have this Message-ID (diff)
From: Tushar Sugandhi <tusharsu@linux.microsoft.com> To: zohar@linux.ibm.com, roberto.sassu@huaweicloud.com, roberto.sassu@huawei.com, eric.snowberg@oracle.com, stefanb@linux.ibm.com, ebiederm@xmission.com, noodles@fb.com, bauermann@kolabnow.com, linux-integrity@vger.kernel.org, kexec@lists.infradead.org Cc: code@tyhicks.com, nramas@linux.microsoft.com, paul@paul-moore.com Subject: [PATCH v3 3/7] ima: kexec: map IMA buffer source pages to image after kexec load Date: Fri, 15 Dec 2023 17:07:25 -0800 [thread overview] Message-ID: <20231216010729.2904751-4-tusharsu@linux.microsoft.com> (raw) In-Reply-To: <20231216010729.2904751-1-tusharsu@linux.microsoft.com> Implement kimage_map_segment() to enable mapping of IMA buffer source pages to the kimage structure post kexec 'load'. This function, accepting a kimage pointer, an address, and a size, will gather the source pages within the specified address range, create an array of page pointers, and map these to a contiguous virtual address range. The function returns the start of this range if successful, or NULL if unsuccessful. Additionally, introduce kimage_unmap_segment() for unmapping segments using vunmap(). Introduce ima_kexec_post_load(), to be invoked by IMA following the kexec 'load' of the new Kernel image. This function will map the IMA buffer, allocated during kexec 'load', to a segment in the loaded image. Lastly, relocate the for_each_kimage_entry() macro from kexec_core.c to kexec.h for broader accessibility. Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com> --- include/linux/ima.h | 3 ++ include/linux/kexec.h | 13 +++++++ kernel/kexec_core.c | 59 +++++++++++++++++++++++++++--- security/integrity/ima/ima_kexec.c | 32 ++++++++++++++++ 4 files changed, 102 insertions(+), 5 deletions(-) diff --git a/include/linux/ima.h b/include/linux/ima.h index 86b57757c7b1..006db20f852d 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -49,6 +49,9 @@ static inline void ima_appraise_parse_cmdline(void) {} #ifdef CONFIG_IMA_KEXEC extern void ima_add_kexec_buffer(struct kimage *image); +extern void ima_kexec_post_load(struct kimage *image); +#else +static inline void ima_kexec_post_load(struct kimage *image) {} #endif #else diff --git a/include/linux/kexec.h b/include/linux/kexec.h index fd94404acc66..eb98aca7f4c7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -493,6 +493,15 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { } #endif +#define for_each_kimage_entry(image, ptr, entry) \ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ + ptr = (entry & IND_INDIRECTION) ? \ + boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) + +extern void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size); +extern void kimage_unmap_segment(void *buffer); + #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; @@ -500,6 +509,10 @@ static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } static inline int kexec_crash_loaded(void) { return 0; } +static inline void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size) +{ return NULL; } +static inline void kimage_unmap_segment(void *buffer) { } #define kexec_in_progress false #endif /* CONFIG_KEXEC_CORE */ diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 3d578c6fefee..26978ad02676 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -594,11 +594,6 @@ void kimage_terminate(struct kimage *image) *image->entry = IND_DONE; } -#define for_each_kimage_entry(image, ptr, entry) \ - for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ - ptr = (entry & IND_INDIRECTION) ? \ - boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) - static void kimage_free_entry(kimage_entry_t entry) { struct page *page; @@ -921,6 +916,60 @@ int kimage_load_segment(struct kimage *image, return result; } +void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size) +{ + unsigned long eaddr = addr + size; + unsigned long src_page_addr, dest_page_addr; + unsigned int npages; + struct page **src_pages; + int i; + kimage_entry_t *ptr, entry; + void *vaddr = NULL; + + /* + * Collect the source pages and map them in a contiguous VA range. + */ + npages = PFN_UP(eaddr) - PFN_DOWN(addr); + src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL); + if (!src_pages) { + pr_err("%s: Could not allocate ima pages array.\n", __func__); + return NULL; + } + + i = 0; + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_DESTINATION) + dest_page_addr = entry & PAGE_MASK; + else if (entry & IND_SOURCE) { + if (dest_page_addr >= addr && dest_page_addr < eaddr) { + src_page_addr = entry & PAGE_MASK; + src_pages[i++] = + virt_to_page(__va(src_page_addr)); + if (i == npages) + break; + dest_page_addr += PAGE_SIZE; + } + } + } + + /* Sanity check. */ + WARN_ON(i < npages); + + vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL); + kfree(src_pages); + + if (!vaddr) + pr_err("%s: Could not map imap buffer.\n", __func__); + + return vaddr; +} + +void kimage_unmap_segment(void *segment_buffer) +{ + vunmap(segment_buffer); +} + struct kexec_load_limit { /* Mutex protects the limit count. */ struct mutex mutex; diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 0063d5e7b634..55bd5362262e 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c @@ -12,12 +12,15 @@ #include <linux/kexec.h> #include <linux/of.h> #include <linux/ima.h> +#include <linux/reboot.h> +#include <asm/page.h> #include "ima.h" #ifdef CONFIG_IMA_KEXEC struct seq_file ima_kexec_file; static void *ima_kexec_buffer; static size_t kexec_segment_size; +static bool ima_kexec_update_registered; void ima_free_kexec_file_buf(struct seq_file *sf) { @@ -201,6 +204,7 @@ static int ima_update_kexec_buffer(struct notifier_block *self, } memcpy(ima_kexec_buffer, buf, buf_size); out: + kimage_unmap_segment(ima_kexec_buffer); ima_kexec_buffer = NULL; if (resume) @@ -213,6 +217,34 @@ struct notifier_block update_buffer_nb = { .notifier_call = ima_update_kexec_buffer, }; +/* + * Create a mapping for the source pages that contain the IMA buffer + * so we can update it later. + */ +void ima_kexec_post_load(struct kimage *image) +{ + if (ima_kexec_buffer) { + kimage_unmap_segment(ima_kexec_buffer); + ima_kexec_buffer = NULL; + } + + if (!image->ima_buffer_addr) + return; + + ima_kexec_buffer = kimage_map_segment(image, + image->ima_buffer_addr, + image->ima_buffer_size); + if (!ima_kexec_buffer) { + pr_err("%s: Could not map measurements buffer.\n", __func__); + return; + } + + if (!ima_kexec_update_registered) { + register_reboot_notifier(&update_buffer_nb); + ima_kexec_update_registered = true; + } +} + #endif /* IMA_KEXEC */ /* -- 2.25.1
next prev parent reply other threads:[~2023-12-16 1:07 UTC|newest] Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top 2023-12-16 1:07 [PATCH v3 0/7] ima: kexec: measure events between kexec load and execute Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi 2023-12-16 1:07 ` [PATCH v3 1/7] ima: define and call ima_alloc_kexec_file_buf Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi 2023-12-20 16:13 ` Mimi Zohar 2023-12-20 16:13 ` Mimi Zohar 2024-01-05 19:47 ` Tushar Sugandhi 2024-01-05 19:47 ` Tushar Sugandhi 2023-12-16 1:07 ` [PATCH v3 2/7] ima: kexec: move ima log copy from kexec load to execute Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi 2023-12-20 19:02 ` Mimi Zohar 2023-12-20 19:02 ` Mimi Zohar 2024-01-11 23:29 ` Tushar Sugandhi 2024-01-11 23:29 ` Tushar Sugandhi 2024-01-12 17:06 ` Mimi Zohar 2024-01-12 17:06 ` Mimi Zohar 2024-01-12 17:26 ` Tushar Sugandhi 2024-01-12 17:26 ` Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi [this message] 2023-12-16 1:07 ` [PATCH v3 3/7] ima: kexec: map IMA buffer source pages to image after kexec load Tushar Sugandhi 2023-12-16 1:07 ` [PATCH v3 4/7] kexec: update kexec_file_load syscall to alloc ima buffer after load Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi 2023-12-16 1:07 ` [PATCH v3 5/7] ima: suspend measurements during buffer copy at kexec execute Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi 2023-12-20 20:44 ` Mimi Zohar 2023-12-20 20:44 ` Mimi Zohar 2024-01-05 19:50 ` Tushar Sugandhi 2024-01-05 19:50 ` Tushar Sugandhi 2024-01-11 17:30 ` Mimi Zohar 2024-01-11 17:30 ` Mimi Zohar 2024-01-11 18:17 ` Tushar Sugandhi 2024-01-11 18:17 ` Tushar Sugandhi 2023-12-16 1:07 ` [PATCH v3 6/7] ima: configure memory to log events between kexec load and execute Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi 2023-12-20 20:15 ` Mimi Zohar 2023-12-20 20:15 ` Mimi Zohar 2024-01-05 20:20 ` Tushar Sugandhi 2024-01-05 20:20 ` Tushar Sugandhi 2024-01-07 17:00 ` Mimi Zohar 2024-01-07 17:00 ` Mimi Zohar 2024-01-11 18:13 ` Tushar Sugandhi 2024-01-11 18:13 ` Tushar Sugandhi 2024-01-11 19:20 ` Stefan Berger 2024-01-11 19:20 ` Stefan Berger 2024-01-11 20:52 ` Tushar Sugandhi 2024-01-11 20:52 ` Tushar Sugandhi 2024-01-12 17:44 ` Mimi Zohar 2024-01-12 17:44 ` Mimi Zohar 2024-01-12 18:23 ` Tushar Sugandhi 2024-01-12 18:23 ` Tushar Sugandhi 2023-12-16 1:07 ` [PATCH v3 7/7] ima: measure kexec load and exec events as critical data Tushar Sugandhi 2023-12-16 1:07 ` Tushar Sugandhi 2023-12-20 20:41 ` Mimi Zohar 2023-12-20 20:41 ` Mimi Zohar 2024-01-05 20:22 ` Tushar Sugandhi 2024-01-05 20:22 ` Tushar Sugandhi 2024-01-07 14:24 ` Mimi Zohar 2024-01-07 14:24 ` Mimi Zohar 2024-01-11 17:56 ` Tushar Sugandhi 2024-01-11 17:56 ` Tushar Sugandhi
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20231216010729.2904751-4-tusharsu@linux.microsoft.com \ --to=tusharsu@linux.microsoft.com \ --cc=bauermann@kolabnow.com \ --cc=code@tyhicks.com \ --cc=ebiederm@xmission.com \ --cc=eric.snowberg@oracle.com \ --cc=kexec@lists.infradead.org \ --cc=linux-integrity@vger.kernel.org \ --cc=noodles@fb.com \ --cc=nramas@linux.microsoft.com \ --cc=paul@paul-moore.com \ --cc=roberto.sassu@huawei.com \ --cc=roberto.sassu@huaweicloud.com \ --cc=stefanb@linux.ibm.com \ --cc=zohar@linux.ibm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.