Module working? Writer done

This commit is contained in:
Zhengyi Chen 2024-01-27 05:38:51 +00:00
parent ede1b4ff6a
commit 200739c892
2 changed files with 59 additions and 50 deletions

View file

@ -6,8 +6,10 @@ CC += ${MY_CFLAGS}
KDIR := /lib/modules/$(shell uname -r)/build
KDIR_CROSS := ${HOME}/Git/linux
KDIR_UOE := /disk/scratch/s2018374/linux
KDIR_SSHFS := /tmp/inf-sshfs/linux
PWD := $(shell pwd)
GITDIR := $(shell git rev-parse --show-toplevel)
all:
$(MAKE) -C $(KDIR) M=$(PWD) modules
@ -21,6 +23,10 @@ all-uoe:
$(MAKE) -C $(KDIR_UOE) M=$(PWD) modules
EXTRA_CFLAGS="$(MY_CFLAGS)"
all-sshfs:
$(MAKE) -C $(KDIR_SSHFS) M=$(PWD) modules
EXTRA_CFLAGS="$(MY_CFLAGS)"
clean:
$(MAKE) -C $(KDIR) M=$(PWD) clean
@ -29,3 +35,11 @@ clean-cross:
clean-uoe:
$(MAKE) -C $(KDIR_UOE) M=$(PWD) clean
clean-sshfs:
$(MAKE) -C $(KDIR_SSHFS) M=$(PWD) clean
# Extra
install:
cp my_shmem.ko $(GITDIR)/test/shared

View file

@ -1,5 +1,7 @@
// [TODO] Clean up headers...
#include <linux/device.h>
#include "asm-generic/memory_model.h"
#include "asm/page-def.h"
#include "linux/gfp.h"
#include "linux/mutex.h"
#include "linux/pfn_t.h"
@ -45,22 +47,20 @@ const char* DEV_NAME = "my_shmem";
*/
static void my_shmem_vmops_close(struct vm_area_struct *vma)
{
// [TODO] Flush only pages we need to flush, via `walk_page_vma`, etc.
// However this works -- and it's proof-of-concept anyways
struct my_shmem_page *curr;
mutex_lock(&my_shmem_pages_mtx);
list_for_each_entry(curr, &my_shmem_pages, list) {
struct page *pg = curr->page;
// Ref: `arch_dma_prep_coherent`
ulong vaddr_start = (ulong) page_address(pg);
ulong vaddr_bgn = (ulong) page_address(pg);
ulong vaddr_end = vaddr_bgn + PAGE_SIZE;
BUG_ON(!virt_addr_valid((void *) vaddr_bgn));
pr_info("[%s] Before flush: 0x%px has 0x%lx [+%ld].\n",
__func__, page_address(pg), *(ulong *)(page_address(pg)),
__func__, (void *) vaddr_bgn, *(ulong *) vaddr_bgn,
sizeof(ulong));
// [!!] `dcache_clean_poc` seems not exported.
__dcache_clean_poc(vaddr_start, vaddr_start + PAGE_SIZE);
__dcache_clean_poc(vaddr_bgn, vaddr_end);
pr_info("[%s] After flush: 0x%px has 0x%lx [+%ld].\n",
__func__, page_address(pg), *(ulong *)(page_address(pg)),
__func__, (void *) vaddr_bgn, *(ulong *) vaddr_bgn,
sizeof(ulong));
}
mutex_unlock(&my_shmem_pages_mtx);
@ -69,31 +69,39 @@ static void my_shmem_vmops_close(struct vm_area_struct *vma)
static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
{
pr_info("[%s] vm_fault @ 0x%lx (real address 0x%lx).\n",
__func__, vmf->address, vmf->real_address);
struct vm_area_struct *vma_of_vmf = vmf->vma;
ulong nr_pages_from_vm_start =
(vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
const pgoff_t _dbg_offset_from_page = vmf->pgoff;
phys_addr_t _dbg_phys;
(vmf->address - vma_of_vmf->vm_start) >> PAGE_SHIFT;
const pgoff_t _dbg_offset_from_vma = vmf->pgoff;
phys_addr_t _dbg_phys_of_page;
BUG_ON(!vma_of_vmf->vm_mm);
mutex_lock(&my_shmem_pages_mtx);
if (nr_pages_from_vm_start < my_shmem_page_count) {
/* Offset in range, return existing page */
pr_info("[%s] Found remappable page nr: %lu, offset: %lu. "
"Total pages allocated: %ld.\n",
__func__, nr_pages_from_vm_start, _dbg_offset_from_page,
my_shmem_page_count);
"Total pages allocated: %ld (list size: %ld).\n",
__func__, nr_pages_from_vm_start, _dbg_offset_from_vma,
my_shmem_page_count, list_count_nodes(&my_shmem_pages));
// We won't delete elements from list here!
// Find correct entry
struct my_shmem_page *page_entry;
list_for_each_entry(page_entry, &my_shmem_pages, list) {
if (!nr_pages_from_vm_start)
break;
if (!nr_pages_from_vm_start) break;
nr_pages_from_vm_start--;
}
// Found correct page entry, remap
// No get_page here, get_page at alloc time only keeps memory
// valid for module code.
// Found correct entry, remap to userspace
pr_info("[%s] Remapped pfn: %ld, kernel vaddr: %px.\n",
__func__, page_to_pfn(page_entry->page),
page_to_virt(page_entry->page));
// get_page(page_entry->page);
vmf->page = page_entry->page;
_dbg_phys = page_to_phys(page_entry->page);
_dbg_phys_of_page = page_to_phys(page_entry->page);
mutex_unlock(&my_shmem_pages_mtx);
goto ok_ret_remapped;
@ -104,6 +112,7 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
nr_pages_from_vm_start - my_shmem_page_count + 1;
pr_info("[%s] Not enough remappable pages, allocating %ld more...\n",
__func__, nr_pages_to_alloc);
struct page *last_pg;
for (; nr_pages_to_alloc > 0; nr_pages_to_alloc--)
{
// Allocate page handle in kernel
@ -114,26 +123,29 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
goto err_ret_no_kmem;
}
// Allocate page in virtual memory,
// for convenience also directly accessible by kernel.
struct page *pg = alloc_page(GFP_USER);
if (!pg) {
// Allocate kernel virtual page
struct page *curr_pg = alloc_page(GFP_USER);
if (!curr_pg) {
mutex_unlock(&my_shmem_pages_mtx);
goto err_ret_no_vmem;
}
get_page(pg);
new_page->page = pg;
pr_info("[%s] Allocated pfn: %ld, kernel vaddr: %px.\n",
__func__, page_to_pfn(curr_pg), page_to_virt(curr_pg));
get_page(curr_pg);
new_page->page = curr_pg;
// List maintenance: add and incr. size
list_add(&new_page->list, &my_shmem_pages);
my_shmem_page_count++;
// Fill in allocated page entry
// Bad for performance, but guarantees to be the last page entry
// on out.
vmf->page = new_page->page;
_dbg_phys = page_to_phys(new_page->page);
// Fill in last_pg for final return from page fault handler
last_pg = curr_pg;
}
// Fill in vmf's page for return
// get_page(last_pg);
vmf->page = last_pg;
_dbg_phys_of_page = page_to_phys(last_pg);
mutex_unlock(&my_shmem_pages_mtx);
goto ok_ret_allocated;
@ -146,27 +158,10 @@ err_ret_no_vmem:
__func__);
return VM_FAULT_OOM;
ok_ret_remapped:
if (vmf->vma->vm_mm) {
rcu_read_lock();
struct task_struct *fault_owner = vmf->vma->vm_mm->owner;
pr_info("[%s] Remapped phys: 0x%llx -> virt@PID(%d): 0x%lx.\n",
__func__, _dbg_phys, fault_owner->pid, vmf->address);
rcu_read_unlock();
}
return 0;
ok_ret_allocated:
if (vmf->vma->vm_mm){
rcu_read_lock();
struct task_struct *fault_owner = vmf->vma->vm_mm->owner;
pr_info("[%s] Allocated phys: 0x%llx -> virt@PID(%d): 0x%lx.\n",
__func__, _dbg_phys, fault_owner->pid, vmf->address);
rcu_read_unlock();
}
return 0;
}
static const struct vm_operations_struct my_shmem_vmops = {
.close = my_shmem_vmops_close,
.fault = my_shmem_vmops_fault,
@ -279,7 +274,7 @@ static void __exit my_shmem_exit(void)
mutex_lock(&my_shmem_pages_mtx);
list_for_each_entry_safe(page_entry, tmp, &my_shmem_pages, list) {
put_page(page_entry->page);
kfree(page_to_virt(page_entry->page));
free_page((ulong) page_to_virt(page_entry->page));
my_shmem_page_count--;
list_del(&page_entry->list);