TODO: test module

This commit is contained in:
Zhengyi Chen 2024-01-20 23:30:38 +00:00
parent f9282a627f
commit 71b52acdb1
13 changed files with 453 additions and 192 deletions

276
shared/my_shmem.c Normal file
View file

@ -0,0 +1,276 @@
// [TODO] Clean up headers
#include <asm/cacheflush.h>
#include "linux/atomic/atomic-long.h"
#include "linux/device.h"
#include "linux/device/class.h"
#include "linux/mutex.h"
#include "linux/pid.h"
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
MODULE_AUTHOR("Zk.");
MODULE_DESCRIPTION("4.2.W1: mmap for point of coherency");
MODULE_LICENSE("GPL");
struct my_shmem_page {
struct page *page;
struct list_head list;
};
static DEFINE_MUTEX(my_shmem_pages_mtx);
/* [!] READ/WRITE UNDER LOCK */
static LIST_HEAD(my_shmem_pages);
/* [!] READ/WRITE UNDER LOCK */
static size_t my_shmem_page_count = 0;
static int major;
static struct class* class;
static struct device* dev;
const char* DEV_NAME = "my_shmem";
/* Virtual Memory Area Operations...
* ============================================================================
*/
static void my_shmem_vmops_close(struct vm_area_struct *vma)
{
// [TODO] Flush dcache at close.
// `dcache_clean_poc` writebacks D-cache region till PoC. Period.
// This should? work on all ARM64 CPUs w/ no-alias VIPT dcache.
// The addresses are VAs (obv., as opposed to PAs).
pr_info("[%s] Closing vma: [0x%lx - 0x%lx].\n",
__func__, vma->vm_start, vma->vm_end);
// It might, however, be better to just call a asm-generic function
// e.g., `flush_dcache_page`... Though I don't think arm64 supports this.
// Not sure why.
dcache_clean_poc(vma->vm_start, vma->vm_end);
// flush_cache_range(vma, vma->vm_start, vma->vm_end);
pr_info("[%s] Flushed dcache.\n", __func__);
}
static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
{
ulong nr_pages_from_vm_start =
(vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
const pgoff_t _dbg_offset_from_page = vmf->pgoff;
phys_addr_t _dbg_phys;
mutex_lock(&my_shmem_pages_mtx);
if (nr_pages_from_vm_start < my_shmem_page_count) {
/* Offset in range, return existing page */
pr_info("[%s] Found remappable page nr: %lu, offset: %lu.\n",
__func__, nr_pages_from_vm_start, _dbg_offset_from_page);
// We won't delete elements from list here!
struct my_shmem_page *page_entry;
list_for_each_entry(page_entry, &my_shmem_pages, list) {
if (!nr_pages_from_vm_start)
break;
nr_pages_from_vm_start--;
}
// Found correct page entry, remap
get_page(page_entry->page); // [FIXME] Incorrect refcount keeping.
vmf->page = page_entry->page;
_dbg_phys = page_to_phys(page_entry->page);
mutex_unlock(&my_shmem_pages_mtx);
goto ok_ret_remapped;
}
/* Otherwise, allocate the new page(s) */
for (int i = 0; i <= nr_pages_from_vm_start - my_shmem_page_count; i++)
{
// Allocate page handle in kernel
struct my_shmem_page *new_page = kzalloc(
sizeof(struct my_shmem_page), GFP_KERNEL);
if (!new_page) {
mutex_unlock(&my_shmem_pages_mtx);
goto err_ret_no_kmem;
}
// Allocate page in virtual memory
// [!] We specifically WANT to allocate cachable memory to
// create cache incoherence btwn procs. Synchronization
// (e.g., on arm64) is done via calling fsync for now.
void *addr = vmalloc_user(PAGE_SIZE);
if (!addr) {
mutex_unlock(&my_shmem_pages_mtx);
goto err_ret_no_vmem;
}
new_page->page = vmalloc_to_page(addr);
// List maintenance: add and incr. size
list_add(&new_page->list, &my_shmem_pages);
my_shmem_page_count++;
// Fill in allocated page entry
get_page(new_page->page);
vmf->page = new_page->page;
_dbg_phys = page_to_phys(new_page->page);
}
mutex_unlock(&my_shmem_pages_mtx);
goto ok_ret_allocated;
err_ret_no_kmem:
pr_err("[%s] Cannot allocate `struct my_shmem_page` in kernel memory.\n",
__func__);
return VM_FAULT_OOM;
err_ret_no_vmem:
pr_err("[%s] Cannot allocate requested page for virtual memory.\n",
__func__);
return VM_FAULT_OOM;
ok_ret_remapped:
if (vmf->vma->vm_mm) {
rcu_read_lock();
struct task_struct *fault_owner = vmf->vma->vm_mm->owner;
pr_info("[%s] Remapped phys: 0x%llx -> virt@PID(%d): 0x%lx.\n",
__func__, _dbg_phys, fault_owner->pid, vmf->address);
rcu_read_unlock();
}
return 0;
ok_ret_allocated:
if (vmf->vma->vm_mm){
rcu_read_lock();
struct task_struct *fault_owner = vmf->vma->vm_mm->owner;
pr_info("[%s] Allocated phys: 0x%llx -> virt@PID(%d): 0x%lx.\n",
__func__, _dbg_phys, fault_owner->pid, vmf->address);
rcu_read_unlock();
}
return 0;
}
static const struct vm_operations_struct my_shmem_vmops = {
.close = my_shmem_vmops_close,
.fault = my_shmem_vmops_fault,
};
/* File Operations...
* ============================================================================
*/
// static int my_shmem_fops_open(struct inode *inode, struct file *filp);
static const struct file_operations my_shmem_fops;
static int my_shmem_fops_mmap(struct file *filp, struct vm_area_struct *vma)
{
vma->vm_ops = &my_shmem_vmops;
pr_info("[%s] Device file '%s' mmapped for vma: [0x%lx - 0x%lx].\n",
__func__, file_dentry(filp)->d_name.name, vma->vm_start, vma->vm_end);
return 0;
}
static int my_shmem_fops_open(struct inode *inode, struct file *filp)
{
filp->f_op = &my_shmem_fops;
pr_info("[%s] Device file '%s' opened.\n",
__func__, file_dentry(filp)->d_name.name);
return 0;
}
static int my_shmem_fops_release(struct inode *inode, struct file *filp)
{
pr_info("[%s] Device file '%s' released.\n",
__func__, file_dentry(filp)->d_name.name);
/* Garbage collection requires knowing who references which page...
* Ideally this is stored in filp->private_data but ah well, oversight.
*/
return 0;
}
static const struct file_operations my_shmem_fops = {
.owner = THIS_MODULE,
.open = my_shmem_fops_open,
.mmap = my_shmem_fops_mmap,
.release = my_shmem_fops_release,
};
/* Module init & exit...
* ============================================================================
*/
static int __init my_shmem_init(void)
{
/* Register cdev */
major = register_chrdev(
0, DEV_NAME, &my_shmem_fops);
if (major < 0)
goto err_ret_cdev_reg_failed;
/* Create device class */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)
class = class_create(DEV_NAME);
#else
// Re: commit dcfbb67e48a2becfce7990386e985b9c45098ee5 (I think?)
class = class_create(THIS_MODULE, DEV_NAME);
#endif
if (IS_ERR(class)) {
unregister_chrdev(major, DEV_NAME);
goto err_ret_class_crea_failed;
}
/* Create one device */
dev_t dev_nr = MKDEV(major, 0);
dev = device_create(
class, NULL, dev_nr, NULL, DEV_NAME);
if (IS_ERR(dev)) {
class_destroy(class);
unregister_chrdev(major, DEV_NAME);
goto err_ret_dev_crea_failed;
}
pr_info("[%s] Device `%s` built successfully.\n",
__func__, dev->init_name);
return 0;
err_ret_cdev_reg_failed:
pr_err("[%s] Cannot register character dev -- error code %d.\n",
__func__, major);
return major;
err_ret_class_crea_failed:
pr_err("[%s] Cannot create device class -- error code %ld.\n",
__func__, PTR_ERR(class));
return (int) PTR_ERR(class);
err_ret_dev_crea_failed:
pr_err("[%s] Cannot create device -- error code %ld.\n",
__func__, PTR_ERR(dev));
return (int) PTR_ERR(dev);
}
static void __exit my_shmem_exit(void)
{
/* Destroy device */
device_destroy(class, dev->devt);
class_destroy(class);
unregister_chrdev(major, DEV_NAME);
pr_info("[%s] Device destroyed.\n", __func__);
/* Free all pages -- I'm not compacting in runtime!!! */
struct my_shmem_page *page_entry, *tmp;
mutex_lock(&my_shmem_pages_mtx);
list_for_each_entry_safe(page_entry, tmp, &my_shmem_pages, list) {
vfree(page_to_virt(page_entry->page));
put_page(page_entry->page);
my_shmem_page_count--;
list_del(&page_entry->list);
kfree(page_entry);
}
mutex_unlock(&my_shmem_pages_mtx);
}
module_init(my_shmem_init);
module_exit(my_shmem_exit);