This commit is contained in:
Zhengyi Chen 2024-01-18 00:59:03 +00:00
parent 0e4e4f3a7b
commit eae77aa719
2 changed files with 169 additions and 6 deletions

View file

@ -6,7 +6,6 @@
#include "linux/gfp_types.h"
#include "linux/rcupdate.h"
#include <asm-generic/cacheflush.h>
#include <asm/cacheflush.h>
#include <linux/irqflags.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
@ -23,6 +22,7 @@ module_param_named(pid, param_pid, int, 0644);
static unsigned long param_vmaddr;
module_param_named(addr, param_vmaddr, ulong, 0644);
static void **page_addrs;
static void *page_addr = NULL;
/* Design:
@ -121,7 +121,7 @@ int _flush_dcache_init_devfs(void)
}
int _flush_dcache_init_backend(void)
int _flush_dcache_alloc_pages(int page_nr)
{
page_addr = alloc_page(GFP_USER);
if (!page_addr)
@ -133,13 +133,13 @@ ret_err_alloc_failed:
return -ENOMEM;
}
static int flush_dcache_open(struct inode *inode, struct file *filp)
static int flush_dcache_fops_open(struct inode *inode, struct file *filp)
{
filp->f_mode |= FMODE_CAN_ODIRECT;
return generic_file_open(inode, filp);
}
static int flush_dcache_mmap(struct file *filp, struct vm_area_struct *vma)
static int flush_dcache_fops_mmap(struct file *filp, struct vm_area_struct *vma)
{
/* Check if vma has mm backing -- e.g., non-kthread */
if (!vma->vm_mm)
@ -171,8 +171,8 @@ ret_err_wrong_size:
const struct file_operations flush_dcache_fops = {
.owner = THIS_MODULE,
.open = flush_dcache_open,
.mmap = flush_dcache_mmap
.open = flush_dcache_fops_open,
.mmap = flush_dcache_fops_mmap
};
// const struct vm_operations_struct flush_dcache_vmops = {

View file

@ -0,0 +1,163 @@
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
struct my_shmem_page {
struct page *page;
struct list_head list;
};
static DEFINE_MUTEX(my_shmem_pages_mtx);
/* [!] READ/WRITE UNDER LOCK */
static LIST_HEAD(my_shmem_pages);
/* [!] READ/WRITE UNDER LOCK */
static size_t my_shmem_page_count = 0;
/* Virtual Memory Area Operations...
* ============================================================================
*/
static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
{
pgoff_t page_offset = vmf->pgoff;
phys_addr_t _phys;
size_t old_shmem_page_count;
mutex_lock(&my_shmem_pages_mtx);
old_shmem_page_count = READ_ONCE(my_shmem_page_count);
if (unlikely(page_offset > old_shmem_page_count)) {
/* IMPOSSIBLE -- programming error or wrong assumption... */
mutex_unlock(&my_shmem_pages_mtx);
goto err_ret_impossible_count;
}
if (page_offset < old_shmem_page_count) {
/* Offset in range, return existing page */
pr_info("[%s] Found remappable page offset %lu.",
__func__, page_offset);
// We won't delete elements from list here!
struct my_shmem_page *page_entry;
list_for_each_entry(page_entry, &my_shmem_pages, list) {
if (!page_offset)
break;
page_offset--;
}
// Found correct page entry, remap
get_page(page_entry->page);
vmf->page = page_entry->page;
_phys = page_to_phys(page_entry->page);
mutex_unlock(&my_shmem_pages_mtx);
goto ok_ret_remapped;
}
/* Otherwise, allocate the new page */
for (int i = 0; i < page_offset - old_shmem_page_count; i++)
{
/* The loop is misleading -- this loops exactly once! */
// Allocate page handle in kernel
struct my_shmem_page *new_page = kzalloc(
sizeof(struct my_shmem_page), GFP_KERNEL);
if (!new_page) {
mutex_unlock(&my_shmem_pages_mtx);
goto err_ret_no_kmem;
}
// Allocate page in virtual memory
void *addr = vmalloc_user(PAGE_SIZE);
if (!addr) {
mutex_unlock(&my_shmem_pages_mtx);
goto err_ret_no_vmem;
}
new_page->page = vmalloc_to_page(addr);
// List maintenance: add and incr. size
list_add(&new_page->list, &my_shmem_pages);
my_shmem_page_count++;
// Fill in allocated page entry
get_page(new_page->page);
vmf->page = new_page->page;
_phys = page_to_phys(new_page->page);
}
mutex_unlock(&my_shmem_pages_mtx);
goto ok_ret_allocated;
err_ret_impossible_count:
pr_crit("[%s] IMPOSSIBLE list count %ld > %ld "
"-- no way one fault services multiple page!!!",
__func__, page_offset, old_shmem_page_count);
return VM_FAULT_ERROR;
err_ret_no_kmem:
pr_err("[%s] Cannot allocate `struct my_shmem_page` in kernel memory.",
__func__);
return VM_FAULT_OOM;
err_ret_no_vmem:
pr_err("[%s] Cannot allocate requested page for virtual memory.",
__func__);
return VM_FAULT_OOM;
ok_ret_remapped:
if (vmf->vma->vm_mm) {
rcu_read_lock();
struct task_struct *fault_owner = vmf->vma->vm_mm->owner;
pr_info("[%s] Remapped phys: 0x%llx -> virt@PID(%d): 0x%lx.",
__func__, _phys, fault_owner->pid, vmf->address);
rcu_read_unlock();
}
return 0;
ok_ret_allocated:
if (vmf->vma->vm_mm){
rcu_read_lock();
struct task_struct *fault_owner = vmf->vma->vm_mm->owner;
pr_info("[%s] Allocated phys: 0x%llx -> virt@PID(%d): 0x%lx.",
__func__, _phys, fault_owner->pid, vmf->address);
rcu_read_unlock();
}
return 0;
}
static const struct vm_operations_struct my_shmem_vmops = {
.fault = my_shmem_vmops_fault,
};
/* File Operations...
* ============================================================================
*/
// static int my_shmem_fops_open(struct inode *inode, struct file *filp);
static int my_shmem_fops_mmap(struct file *filp, struct vm_area_struct *vma)
{
vma->vm_ops = &my_shmem_vmops;
return 0;
}
static const struct file_operations my_shmem_fops = {
.owner = THIS_MODULE,
.mmap = my_shmem_fops_mmap,
.fsync = noop_fsync,
};
/* Module init & exit...
* ============================================================================
*/
static int __init my_shmem_init(void)
{
int reg_cdev_ret = register_chrdev(0, "my_shmem", &my_shmem_fops);
if (reg_cdev_ret != 0)
goto err_ret_cdev_reg_failed;
return 0;
err_ret_cdev_reg_failed:
pr_err("[%s] Cannot register character dev -- error code %d.",
__func__, reg_cdev_ret);
return reg_cdev_ret;
}