TODO: test module

This commit is contained in:
Zhengyi Chen 2024-01-20 23:30:38 +00:00
parent f9282a627f
commit 71b52acdb1
13 changed files with 453 additions and 192 deletions

1
src/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
linux-debian-src/

View file

@ -0,0 +1,9 @@
obj-m += my_shmem.o
KDIR := /lib/modules/$(shell uname -r)/build
PWD := $(shell pwd)
all:
$(MAKE) -C $(KDIR) M=$(PWD) modules
clean:
$(MAKE) -C $(KDIR) M=$(PWD) clean

View file

@ -1,185 +0,0 @@
#include "asm-generic/errno-base.h"
#include "asm/current.h"
#include "asm/page-def.h"
#include "linux/export.h"
#include "linux/fs.h"
#include "linux/gfp_types.h"
#include "linux/rcupdate.h"
#include <asm-generic/cacheflush.h>
#include <linux/irqflags.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
/* PID: The task for which page cache is to be flushed. */
static int param_pid;
module_param_named(pid, param_pid, int, 0644);
/* addr: The virtual memory address used to index into the page to be flushed. */
static unsigned long param_vmaddr;
module_param_named(addr, param_vmaddr, ulong, 0644);
static void **page_addrs;
static void *page_addr = NULL;
/* Design:
* 2 userspace threads (each affine to a different CPU to prevent non-aliasing
* caches from incoherence prevention).
* 1. Load kernel module, init chardev which provide page mapping.
* 2. Run userspace program, wherein each thread mmaps the chardev for the same
* mapping.
* Otherwise -- one thread mmaps the chardev, but the kernel itself also
* modifies.
* 3. On write: kernel module examines the content of in-kernel mapping vs.
* userspace mapping, which have same PA but obv. diff VA. They should be
* dissimilar by VIPT.
* Then, the kernel flushes dcache for the userspace to read.
* Finally, it reads again to see whether two mappings agree.
*/
static int __init flush_dcache_init(void)
{
struct task_struct *tsk;
struct mm_struct *mm_of_tsk;
struct page *page_of_addr;
// Get task_struct from PID, then get its mm_struct
rcu_read_lock();
tsk = find_task_by_pid_ns(param_pid, &init_pid_ns);
if (!tsk) {
rcu_read_unlock();
goto ret_err_no_tsk;
}
mm_of_tsk = get_task_mm(tsk);
rcu_read_unlock();
// No mm_struct -- may be kthread?
if (!mm_of_tsk)
goto ret_warn_kthread;
/* Get page from mm_struct --
* We need to pin the page i.e., have it marked as FOLL_PIN.
* Ref. Documentation/core-api/pin_user_pages.rst:
* Because we are writing to the data represented by the page -- we are
* flushing cache to dirty page, after all -- we need FOLL_PIN instead
* of *get-API. This effectively? prevents page from being evicted in
* the short term.
*/
mmap_read_lock(mm_of_tsk);
long pin_pages_retval = pin_user_pages_remote(
mm_of_tsk, param_vmaddr, 1,
FOLL_WRITE, &page_of_addr, NULL
); // We know mmap is locked, stop asking.
if (pin_pages_retval != 1) {
mmap_read_unlock(mm_of_tsk);
goto ret_err_no_page;
}
/* Begin test --
* It may complicate things if this got preempted, so no preemption.
*/
unsigned long _eflags;
local_irq_save(_eflags);
// [TODO] Alter main memory content from kernel.
pr_info("Before flush: cache line/memory diff: ");
#if ARCH == arm64
// dcache_clean_poc()
#endif
// dcache_clean_
pr_info("After flush: ...");
mmap_read_unlock(mm_of_tsk);
local_irq_restore(_eflags);
unpin_user_pages(&page_of_addr, 1);
return 0;
ret_warn_kthread:
pr_warn("[%s] Cannot find `tsk->mm` for PID %d. This may be a kthread.\n"
"Messing with `active_mm` may be unsafe. Exiting...",
__func__, param_pid);
return 0;
ret_err_no_tsk:
pr_err("[%s] Cannot find `task_struct` for PID %d.",
__func__, param_pid);
return -EINVAL;
ret_err_no_page:
pr_err("[%s] Cannot pin requested pages. [TODO]", __func__);
return pin_pages_retval == 0 ? -EINVAL : (int)pin_pages_retval;
}
static void __exit flush_dcache_exit(void)
{
free_page((ulong) page_addr);
pr_info("[%s] See ya~", __func__);
}
int _flush_dcache_init_devfs(void)
{
}
int _flush_dcache_alloc_pages(int page_nr)
{
page_addr = alloc_page(GFP_USER);
if (!page_addr)
goto ret_err_alloc_failed;
return 0;
ret_err_alloc_failed:
pr_err("[%s] Failed to allocate virtual memory page.", __func__);
return -ENOMEM;
}
static int flush_dcache_fops_open(struct inode *inode, struct file *filp)
{
filp->f_mode |= FMODE_CAN_ODIRECT;
return generic_file_open(inode, filp);
}
static int flush_dcache_fops_mmap(struct file *filp, struct vm_area_struct *vma)
{
/* Check if vma has mm backing -- e.g., non-kthread */
if (!vma->vm_mm)
goto ret_err_no_mm;
/* Check vma size */
ulong vma_size =
PAGE_ALIGN(max(PAGE_SIZE, vma->vm_end - vma->vm_start));
if (vma_size != PAGE_SIZE)
goto ret_err_wrong_size;
/* Insert page */
struct page *page = vmalloc_to_page(page_addr);
vm_insert_page(vma, vma->vm_start, page);
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND);
pr_info("[%s] mmapped vma: 0x%lx-0x%lx",
__func__, vma->vm_start, vma->vm_end);
return 0;
ret_err_no_mm:
pr_err("[%s] vm_area_struct has null vm_mm -- kthread or crashed?",
__func__);
return -EINVAL;
ret_err_wrong_size:
pr_err("[%s] Requested %ld pages -- please request only 1!",
__func__, vma_size);
return -EINVAL;
}
const struct file_operations flush_dcache_fops = {
.owner = THIS_MODULE,
.open = flush_dcache_fops_open,
.mmap = flush_dcache_fops_mmap
};
// const struct vm_operations_struct flush_dcache_vmops = {
// .mremap = NULL,
// };
module_init(flush_dcache_init);
module_exit(flush_dcache_exit);
MODULE_LICENSE("GPL");

View file

@ -1,10 +1,11 @@
#include "asm-generic/cacheflush.h"
#include "asm/cacheflush.h"
// [TODO] Clean up headers
#include <asm/cacheflush.h>
#include "asm/page-def.h"
#include "linux/atomic/atomic-long.h"
#include "linux/device.h"
#include "linux/device/class.h"
#include "linux/mutex.h"
#include "linux/pfn_t.h"
#include "linux/pid.h"
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
@ -17,9 +18,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
MODULE_AUTHOR("Zk.");
MODULE_DESCRIPTION("4.2.1: mmap for point of coherency");
MODULE_DESCRIPTION("4.2.W1: mmap for point of coherency");
MODULE_LICENSE("GPL");
struct my_shmem_page {
@ -45,11 +47,25 @@ const char* DEV_NAME = "my_shmem";
static void my_shmem_vmops_close(struct vm_area_struct *vma)
{
// [TODO] Flush dcache at close.
// `dcache_clean_poc` writebacks D-cache region s.t. PoC. Period.
// `dcache_clean_poc` writebacks D-cache region till PoC. Period.
// This should? work on all ARM64 CPUs w/ no-alias VIPT dcache.
// The addresses are VAs (obv., as opposed to PAs).
// dcache_clean_poc(unsigned long start, unsigned long end)
dcache_clean_poc(vma->vm_start, vma->vm_end);
pr_info("[%s] Closing vma: [0x%lx - 0x%lx].\n",
__func__, vma->vm_start, vma->vm_end);
// Or dcache_clean_poc(vma->vm_start, vma->vm_end)...
// which I'm not sure if it's correct, we'll see.
for (ulong addr = PAGE_ALIGN(vma->vm_start);
addr <= PAGE_ALIGN(vma->vm_end);
addr += PAGE_SIZE)
{
// For each page intersected by this vma:
struct page *pg = vmalloc_to_page((void *) addr);
BUG_ON(pg == NULL); // [>_<] Hope this works...
pr_info("[%s] Flushing page 0x%llx.\n",
__func__, page_to_pfn_t(pg).val);
flush_dcache_page(pg);
}
pr_info("[%s] Flushed dcache.\n", __func__);
}
static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
@ -73,7 +89,7 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
nr_pages_from_vm_start--;
}
// Found correct page entry, remap
get_page(page_entry->page);
get_page(page_entry->page); // [FIXME] Incorrect refcount keeping.
vmf->page = page_entry->page;
_dbg_phys = page_to_phys(page_entry->page);
@ -146,6 +162,7 @@ ok_ret_allocated:
static const struct vm_operations_struct my_shmem_vmops = {
.close = my_shmem_vmops_close,
.fault = my_shmem_vmops_fault,
};
@ -204,7 +221,12 @@ static int __init my_shmem_init(void)
goto err_ret_cdev_reg_failed;
/* Create device class */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)
class = class_create(DEV_NAME);
#else
// Re: commit dcfbb67e48a2becfce7990386e985b9c45098ee5 (I think?)
class = class_create(THIS_MODULE, DEV_NAME);
#endif
if (IS_ERR(class)) {
unregister_chrdev(major, DEV_NAME);
goto err_ret_class_crea_failed;