Fixed again. The idea is bombed

This commit is contained in:
Zhengyi Chen 2024-01-28 06:44:34 +00:00
parent 200739c892
commit 83cee5c2b9

View file

@ -1,5 +1,6 @@
// [TODO] Clean up headers... // [TODO] Clean up headers...
#include <linux/device.h> #include <linux/device.h>
#include "asm-generic/errno-base.h"
#include "asm-generic/memory_model.h" #include "asm-generic/memory_model.h"
#include "asm/page-def.h" #include "asm/page-def.h"
#include "linux/gfp.h" #include "linux/gfp.h"
@ -35,7 +36,7 @@ static DEFINE_MUTEX(my_shmem_pages_mtx);
static LIST_HEAD(my_shmem_pages); static LIST_HEAD(my_shmem_pages);
/* [!] READ/WRITE UNDER LOCK */ /* [!] READ/WRITE UNDER LOCK */
static size_t my_shmem_page_count = 0; // static size_t my_shmem_page_count = 0;
static int major; static int major;
static struct class* class; static struct class* class;
@ -45,25 +46,57 @@ const char* DEV_NAME = "my_shmem";
/* Virtual Memory Area Operations... /* Virtual Memory Area Operations...
* ============================================================================ * ============================================================================
*/ */
/* [TODO]
* This don't work, not bc. __dcache_clean_poc don't work, but bc. the cache for
* the *ENTIRE MM* has already been flushed prior to calling this.
*/
static void my_shmem_vmops_close(struct vm_area_struct *vma) static void my_shmem_vmops_close(struct vm_area_struct *vma)
{ {
struct my_shmem_page *curr; pr_info("[%s] Entered.\n", __func__);
/* [?]
* For some reason, `get_user_pages` on vma always fails with EFAULT.
* I feel like this should only be the case if `close` is called only
* after PT entries are removed. It looks like so (re: exit_mmap:3322)
* but is this the case?
*
* Or maybe it's `unmap_vmas` which invalidates everything in MMU?
*/
size_t nr_pages_of_vma = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
size_t nr_pages_offset = vma->vm_pgoff;
struct my_shmem_page *entry;
mutex_lock(&my_shmem_pages_mtx); mutex_lock(&my_shmem_pages_mtx);
list_for_each_entry(curr, &my_shmem_pages, list) { list_for_each_entry(entry, &my_shmem_pages, list) {
struct page *pg = curr->page; /* Loop until first page out of offset */
ulong vaddr_bgn = (ulong) page_address(pg); if (nr_pages_offset) {
ulong vaddr_end = vaddr_bgn + PAGE_SIZE; nr_pages_offset--;
BUG_ON(!virt_addr_valid((void *) vaddr_bgn)); continue;
}
/* Early exit when all of vma is done */
if (!nr_pages_of_vma)
break;
/* Inside the window of mapped pages -- flush them up */
struct page *pg = entry->page;
ulong kvaddr_bgn = (ulong) page_address(pg);
ulong kvaddr_end = kvaddr_bgn + PAGE_SIZE;
pr_info("[%s] Before flush: 0x%px has 0x%lx [+%ld].\n", pr_info("[%s] Before flush: 0x%px has 0x%lx [+%ld].\n",
__func__, (void *) vaddr_bgn, *(ulong *) vaddr_bgn, __func__, (void *) kvaddr_bgn, *(ulong *) kvaddr_bgn,
sizeof(ulong)); sizeof(ulong));
__dcache_clean_poc(vaddr_bgn, vaddr_end); __dcache_clean_poc(kvaddr_bgn, kvaddr_end);
put_page(pg);
pr_info("[%s] After flush: 0x%px has 0x%lx [+%ld].\n", pr_info("[%s] After flush: 0x%px has 0x%lx [+%ld].\n",
__func__, (void *) vaddr_bgn, *(ulong *) vaddr_bgn, __func__, (void *) kvaddr_bgn, *(ulong *) kvaddr_bgn,
sizeof(ulong)); sizeof(ulong));
nr_pages_of_vma--;
} }
mutex_unlock(&my_shmem_pages_mtx); mutex_unlock(&my_shmem_pages_mtx);
pr_info("[%s] Flushed dcache.\n", __func__); pr_info("[%s] Flushed dcache.\n", __func__);
} }
@ -75,44 +108,17 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
struct vm_area_struct *vma_of_vmf = vmf->vma; struct vm_area_struct *vma_of_vmf = vmf->vma;
ulong nr_pages_from_vm_start = ulong nr_pages_from_vm_start =
(vmf->address - vma_of_vmf->vm_start) >> PAGE_SHIFT; (vmf->address - vma_of_vmf->vm_start) >> PAGE_SHIFT;
const pgoff_t _dbg_offset_from_vma = vmf->pgoff;
phys_addr_t _dbg_phys_of_page; phys_addr_t _dbg_phys_of_page;
struct page *last_pg;
BUG_ON(!vma_of_vmf->vm_mm);
mutex_lock(&my_shmem_pages_mtx); mutex_lock(&my_shmem_pages_mtx);
if (nr_pages_from_vm_start < my_shmem_page_count) { size_t my_shmem_page_count = list_count_nodes(&my_shmem_pages);
/* Offset in range, return existing page */ BUG_ON(nr_pages_from_vm_start < my_shmem_page_count);
pr_info("[%s] Found remappable page nr: %lu, offset: %lu. "
"Total pages allocated: %ld (list size: %ld).\n",
__func__, nr_pages_from_vm_start, _dbg_offset_from_vma,
my_shmem_page_count, list_count_nodes(&my_shmem_pages));
// Find correct entry /* Allocate the new page(s) */
struct my_shmem_page *page_entry; ulong nr_pages_to_alloc = nr_pages_from_vm_start - my_shmem_page_count + 1;
list_for_each_entry(page_entry, &my_shmem_pages, list) { pr_info("[%s] Page count %ld, offset %ld -- allocating %ld more...\n",
if (!nr_pages_from_vm_start) break; __func__, my_shmem_page_count, vmf->pgoff, nr_pages_to_alloc);
nr_pages_from_vm_start--;
}
// Found correct entry, remap to userspace
pr_info("[%s] Remapped pfn: %ld, kernel vaddr: %px.\n",
__func__, page_to_pfn(page_entry->page),
page_to_virt(page_entry->page));
// get_page(page_entry->page);
vmf->page = page_entry->page;
_dbg_phys_of_page = page_to_phys(page_entry->page);
mutex_unlock(&my_shmem_pages_mtx);
goto ok_ret_remapped;
}
/* Otherwise, allocate the new page(s) */
ulong nr_pages_to_alloc =
nr_pages_from_vm_start - my_shmem_page_count + 1;
pr_info("[%s] Not enough remappable pages, allocating %ld more...\n",
__func__, nr_pages_to_alloc);
struct page *last_pg;
for (; nr_pages_to_alloc > 0; nr_pages_to_alloc--) for (; nr_pages_to_alloc > 0; nr_pages_to_alloc--)
{ {
// Allocate page handle in kernel // Allocate page handle in kernel
@ -127,22 +133,22 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
struct page *curr_pg = alloc_page(GFP_USER); struct page *curr_pg = alloc_page(GFP_USER);
if (!curr_pg) { if (!curr_pg) {
mutex_unlock(&my_shmem_pages_mtx); mutex_unlock(&my_shmem_pages_mtx);
goto err_ret_no_vmem; goto err_ret_no_page;
} }
pr_info("[%s] Allocated pfn: %ld, kernel vaddr: %px.\n", pr_info("[%s] Allocated pfn: %ld, kernel vaddr: %px.\n",
__func__, page_to_pfn(curr_pg), page_to_virt(curr_pg)); __func__, page_to_pfn(curr_pg), page_to_virt(curr_pg));
get_page(curr_pg); get_page(curr_pg); // For base page refcount
new_page->page = curr_pg; new_page->page = curr_pg;
// List maintenance: add and incr. size // List maintenance: add and incr. size
list_add(&new_page->list, &my_shmem_pages); list_add(&new_page->list, &my_shmem_pages);
my_shmem_page_count++; // my_shmem_page_count++;
// Fill in last_pg for final return from page fault handler // Fill in last_pg for final return from page fault handler
last_pg = curr_pg; last_pg = curr_pg;
} }
// Fill in vmf's page for return // Fill in vmf's page for return
// get_page(last_pg); get_page(last_pg);
vmf->page = last_pg; vmf->page = last_pg;
_dbg_phys_of_page = page_to_phys(last_pg); _dbg_phys_of_page = page_to_phys(last_pg);
@ -153,11 +159,10 @@ err_ret_no_kmem:
pr_err("[%s] Cannot allocate `struct my_shmem_page` in kernel memory.\n", pr_err("[%s] Cannot allocate `struct my_shmem_page` in kernel memory.\n",
__func__); __func__);
return VM_FAULT_OOM; return VM_FAULT_OOM;
err_ret_no_vmem: err_ret_no_page:
pr_err("[%s] Cannot allocate requested page for virtual memory.\n", pr_err("[%s] Cannot allocate requested page for virtual memory.\n",
__func__); __func__);
return VM_FAULT_OOM; return VM_FAULT_OOM;
ok_ret_remapped:
ok_ret_allocated: ok_ret_allocated:
return 0; return 0;
} }
@ -176,11 +181,66 @@ static const struct file_operations my_shmem_fops;
static int my_shmem_fops_mmap(struct file *filp, struct vm_area_struct *vma) static int my_shmem_fops_mmap(struct file *filp, struct vm_area_struct *vma)
{ {
int ret = 0;
ulong uvaddr = vma->vm_start;
ulong upgoff = vma->vm_pgoff;
struct my_shmem_page *curr;
struct page *pg;
const unsigned char *fp_name;
vma->vm_ops = &my_shmem_vmops; vma->vm_ops = &my_shmem_vmops;
pr_info("[%s] Device file '%s' mmapped for vma: [0x%lx - 0x%lx].\n", /* Remap as much as possible */
__func__, file_dentry(filp)->d_name.name, vma->vm_start, vma->vm_end); mutex_lock(&my_shmem_pages_mtx);
return 0; size_t my_shmem_page_count = list_count_nodes(&my_shmem_pages);
if (!my_shmem_page_count) {
mutex_unlock(&my_shmem_pages_mtx);
goto cleanup_ok;
}
list_for_each_entry(curr, &my_shmem_pages, list) {
/* If userspace virt addr >= vm_end, exit. */
if (uvaddr >= vma->vm_end)
break;
/* Wait until the vm_pgoff-th page, if exists. */
if (upgoff != 0) {
upgoff--;
continue;
}
/* Exists allocable page, remap */
pg = curr->page;
get_page(pg);
ret = remap_pfn_range(
vma, uvaddr, page_to_pfn(pg), PAGE_SIZE,
vma->vm_page_prot);
if (ret) {
mutex_unlock(&my_shmem_pages_mtx);
goto cleanup_err_remap_pfn_failed;
}
pr_info("[%s] Remapped pfn %ld (kvaddr: 0x%px) -> uvaddr: 0x%px.\n",
__func__, page_to_pfn(pg), page_to_virt(pg), (void *) uvaddr);
uvaddr += PAGE_SIZE;
}
/* May still have unmapped pages, we allocate lazily at fault time. */
mutex_unlock(&my_shmem_pages_mtx);
goto cleanup_ok;
cleanup_err_remap_pfn_failed:
pr_err("[%s] Cannot remap pfn %ld (kvaddr: 0x%px) -> uvaddr: 0x%px: %d.\n",
__func__, page_to_pfn(pg), page_to_virt(pg), (void *) uvaddr, ret);
put_page(pg);
goto exit;
cleanup_ok:
fp_name = file_dentry(filp)->d_name.name;
if (uvaddr == vma->vm_end)
pr_info("[%s] Device file '%s' mmapped for vma: [0x%lx - 0x%lx).\n",
__func__, fp_name, vma->vm_start, vma->vm_end);
else
pr_info("[%s] Device file '%s' mmapped for vma: [0x%lx - 0x%lx..0x%lx).\n",
__func__, fp_name, vma->vm_start, uvaddr, vma->vm_end);
exit:
return ret;
} }
static int my_shmem_fops_open(struct inode *inode, struct file *filp) static int my_shmem_fops_open(struct inode *inode, struct file *filp)
@ -203,6 +263,8 @@ static int my_shmem_fops_release(struct inode *inode, struct file *filp)
return 0; return 0;
} }
// static int
static const struct file_operations my_shmem_fops = { static const struct file_operations my_shmem_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = my_shmem_fops_open, .open = my_shmem_fops_open,
@ -276,7 +338,7 @@ static void __exit my_shmem_exit(void)
put_page(page_entry->page); put_page(page_entry->page);
free_page((ulong) page_to_virt(page_entry->page)); free_page((ulong) page_to_virt(page_entry->page));
my_shmem_page_count--; // my_shmem_page_count--;
list_del(&page_entry->list); list_del(&page_entry->list);
kfree(page_entry); kfree(page_entry);
} }