diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kexec_core.c | 54 | ||||
-rw-r--r-- | kernel/kexec_file.c | 33 |
2 files changed, 86 insertions, 1 deletions
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 3e62b944c883..9c59fa480b0b 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -877,6 +877,60 @@ int kimage_load_segment(struct kimage *image, return result; } +void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size) +{ + unsigned long src_page_addr, dest_page_addr = 0; + unsigned long eaddr = addr + size; + kimage_entry_t *ptr, entry; + struct page **src_pages; + unsigned int npages; + void *vaddr = NULL; + int i; + + /* + * Collect the source pages and map them in a contiguous VA range. + */ + npages = PFN_UP(eaddr) - PFN_DOWN(addr); + src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL); + if (!src_pages) { + pr_err("Could not allocate ima pages array.\n"); + return NULL; + } + + i = 0; + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_DESTINATION) { + dest_page_addr = entry & PAGE_MASK; + } else if (entry & IND_SOURCE) { + if (dest_page_addr >= addr && dest_page_addr < eaddr) { + src_page_addr = entry & PAGE_MASK; + src_pages[i++] = + virt_to_page(__va(src_page_addr)); + if (i == npages) + break; + dest_page_addr += PAGE_SIZE; + } + } + } + + /* Sanity check. */ + WARN_ON(i < npages); + + vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL); + kfree(src_pages); + + if (!vaddr) + pr_err("Could not map ima buffer.\n"); + + return vaddr; +} + +void kimage_unmap_segment(void *segment_buffer) +{ + vunmap(segment_buffer); +} + struct kexec_load_limit { /* Mutex protects the limit count. */ struct mutex mutex; diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index fba686487e3b..0adb645072aa 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -38,6 +38,21 @@ void set_kexec_sig_enforced(void) } #endif +#ifdef CONFIG_IMA_KEXEC +static bool check_ima_segment_index(struct kimage *image, int i) +{ + if (image->is_ima_segment_index_set && i == image->ima_segment_index) + return true; + else + return false; +} +#else +static bool check_ima_segment_index(struct kimage *image, int i) +{ + return false; +} +#endif + static int kexec_calculate_store_digests(struct kimage *image); /* Maximum size in bytes for kernel/initrd files. */ @@ -186,6 +201,15 @@ kimage_validate_signature(struct kimage *image) } #endif +static int kexec_post_load(struct kimage *image, unsigned long flags) +{ +#ifdef CONFIG_IMA_KEXEC + if (!(flags & KEXEC_FILE_ON_CRASH)) + ima_kexec_post_load(image); +#endif + return machine_kexec_post_load(image); +} + /* * In file mode list of segments is prepared by kernel. Copy relevant * data from user space, do error checking, prepare segment list @@ -413,7 +437,7 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, kimage_terminate(image); - ret = machine_kexec_post_load(image); + ret = kexec_post_load(image, flags); if (ret) goto out; @@ -776,6 +800,13 @@ static int kexec_calculate_store_digests(struct kimage *image) if (ksegment->kbuf == pi->purgatory_buf) continue; + /* + * Skip the segment if ima_segment_index is set and matches + * the current index + */ + if (check_ima_segment_index(image, i)) + continue; + ret = crypto_shash_update(desc, ksegment->kbuf, ksegment->bufsz); if (ret) |