Fixed again. The idea is bombed
This commit is contained in:
parent
200739c892
commit
83cee5c2b9
1 changed files with 116 additions and 54 deletions
|
|
@ -1,5 +1,6 @@
|
|||
// [TODO] Clean up headers...
|
||||
#include <linux/device.h>
|
||||
#include "asm-generic/errno-base.h"
|
||||
#include "asm-generic/memory_model.h"
|
||||
#include "asm/page-def.h"
|
||||
#include "linux/gfp.h"
|
||||
|
|
@ -35,7 +36,7 @@ static DEFINE_MUTEX(my_shmem_pages_mtx);
|
|||
static LIST_HEAD(my_shmem_pages);
|
||||
|
||||
/* [!] READ/WRITE UNDER LOCK */
|
||||
static size_t my_shmem_page_count = 0;
|
||||
// static size_t my_shmem_page_count = 0;
|
||||
|
||||
static int major;
|
||||
static struct class* class;
|
||||
|
|
@ -45,25 +46,57 @@ const char* DEV_NAME = "my_shmem";
|
|||
/* Virtual Memory Area Operations...
|
||||
* ============================================================================
|
||||
*/
|
||||
|
||||
/* [TODO]
|
||||
* This don't work, not bc. __dcache_clean_poc don't work, but bc. the cache for
|
||||
* the *ENTIRE MM* has already been flushed prior to calling this.
|
||||
*/
|
||||
static void my_shmem_vmops_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct my_shmem_page *curr;
|
||||
pr_info("[%s] Entered.\n", __func__);
|
||||
|
||||
/* [?]
|
||||
* For some reason, `get_user_pages` on vma always fails with EFAULT.
|
||||
* I feel like this should only be the case if `close` is called only
|
||||
* after PT entries are removed. It looks like so (re: exit_mmap:3322)
|
||||
* but is this the case?
|
||||
*
|
||||
* Or maybe it's `unmap_vmas` which invalidates everything in MMU?
|
||||
*/
|
||||
size_t nr_pages_of_vma = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
size_t nr_pages_offset = vma->vm_pgoff;
|
||||
struct my_shmem_page *entry;
|
||||
|
||||
mutex_lock(&my_shmem_pages_mtx);
|
||||
list_for_each_entry(curr, &my_shmem_pages, list) {
|
||||
struct page *pg = curr->page;
|
||||
ulong vaddr_bgn = (ulong) page_address(pg);
|
||||
ulong vaddr_end = vaddr_bgn + PAGE_SIZE;
|
||||
BUG_ON(!virt_addr_valid((void *) vaddr_bgn));
|
||||
list_for_each_entry(entry, &my_shmem_pages, list) {
|
||||
/* Loop until first page out of offset */
|
||||
if (nr_pages_offset) {
|
||||
nr_pages_offset--;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Early exit when all of vma is done */
|
||||
if (!nr_pages_of_vma)
|
||||
break;
|
||||
|
||||
/* Inside the window of mapped pages -- flush them up */
|
||||
struct page *pg = entry->page;
|
||||
ulong kvaddr_bgn = (ulong) page_address(pg);
|
||||
ulong kvaddr_end = kvaddr_bgn + PAGE_SIZE;
|
||||
|
||||
pr_info("[%s] Before flush: 0x%px has 0x%lx [+%ld].\n",
|
||||
__func__, (void *) vaddr_bgn, *(ulong *) vaddr_bgn,
|
||||
__func__, (void *) kvaddr_bgn, *(ulong *) kvaddr_bgn,
|
||||
sizeof(ulong));
|
||||
__dcache_clean_poc(vaddr_bgn, vaddr_end);
|
||||
__dcache_clean_poc(kvaddr_bgn, kvaddr_end);
|
||||
put_page(pg);
|
||||
pr_info("[%s] After flush: 0x%px has 0x%lx [+%ld].\n",
|
||||
__func__, (void *) vaddr_bgn, *(ulong *) vaddr_bgn,
|
||||
__func__, (void *) kvaddr_bgn, *(ulong *) kvaddr_bgn,
|
||||
sizeof(ulong));
|
||||
|
||||
nr_pages_of_vma--;
|
||||
}
|
||||
mutex_unlock(&my_shmem_pages_mtx);
|
||||
|
||||
pr_info("[%s] Flushed dcache.\n", __func__);
|
||||
}
|
||||
|
||||
|
|
@ -75,44 +108,17 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
|
|||
struct vm_area_struct *vma_of_vmf = vmf->vma;
|
||||
ulong nr_pages_from_vm_start =
|
||||
(vmf->address - vma_of_vmf->vm_start) >> PAGE_SHIFT;
|
||||
const pgoff_t _dbg_offset_from_vma = vmf->pgoff;
|
||||
phys_addr_t _dbg_phys_of_page;
|
||||
|
||||
BUG_ON(!vma_of_vmf->vm_mm);
|
||||
struct page *last_pg;
|
||||
|
||||
mutex_lock(&my_shmem_pages_mtx);
|
||||
if (nr_pages_from_vm_start < my_shmem_page_count) {
|
||||
/* Offset in range, return existing page */
|
||||
pr_info("[%s] Found remappable page nr: %lu, offset: %lu. "
|
||||
"Total pages allocated: %ld (list size: %ld).\n",
|
||||
__func__, nr_pages_from_vm_start, _dbg_offset_from_vma,
|
||||
my_shmem_page_count, list_count_nodes(&my_shmem_pages));
|
||||
size_t my_shmem_page_count = list_count_nodes(&my_shmem_pages);
|
||||
BUG_ON(nr_pages_from_vm_start < my_shmem_page_count);
|
||||
|
||||
// Find correct entry
|
||||
struct my_shmem_page *page_entry;
|
||||
list_for_each_entry(page_entry, &my_shmem_pages, list) {
|
||||
if (!nr_pages_from_vm_start) break;
|
||||
nr_pages_from_vm_start--;
|
||||
}
|
||||
|
||||
// Found correct entry, remap to userspace
|
||||
pr_info("[%s] Remapped pfn: %ld, kernel vaddr: %px.\n",
|
||||
__func__, page_to_pfn(page_entry->page),
|
||||
page_to_virt(page_entry->page));
|
||||
// get_page(page_entry->page);
|
||||
vmf->page = page_entry->page;
|
||||
_dbg_phys_of_page = page_to_phys(page_entry->page);
|
||||
|
||||
mutex_unlock(&my_shmem_pages_mtx);
|
||||
goto ok_ret_remapped;
|
||||
}
|
||||
|
||||
/* Otherwise, allocate the new page(s) */
|
||||
ulong nr_pages_to_alloc =
|
||||
nr_pages_from_vm_start - my_shmem_page_count + 1;
|
||||
pr_info("[%s] Not enough remappable pages, allocating %ld more...\n",
|
||||
__func__, nr_pages_to_alloc);
|
||||
struct page *last_pg;
|
||||
/* Allocate the new page(s) */
|
||||
ulong nr_pages_to_alloc = nr_pages_from_vm_start - my_shmem_page_count + 1;
|
||||
pr_info("[%s] Page count %ld, offset %ld -- allocating %ld more...\n",
|
||||
__func__, my_shmem_page_count, vmf->pgoff, nr_pages_to_alloc);
|
||||
for (; nr_pages_to_alloc > 0; nr_pages_to_alloc--)
|
||||
{
|
||||
// Allocate page handle in kernel
|
||||
|
|
@ -127,22 +133,22 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf)
|
|||
struct page *curr_pg = alloc_page(GFP_USER);
|
||||
if (!curr_pg) {
|
||||
mutex_unlock(&my_shmem_pages_mtx);
|
||||
goto err_ret_no_vmem;
|
||||
goto err_ret_no_page;
|
||||
}
|
||||
pr_info("[%s] Allocated pfn: %ld, kernel vaddr: %px.\n",
|
||||
__func__, page_to_pfn(curr_pg), page_to_virt(curr_pg));
|
||||
get_page(curr_pg);
|
||||
get_page(curr_pg); // For base page refcount
|
||||
new_page->page = curr_pg;
|
||||
|
||||
// List maintenance: add and incr. size
|
||||
list_add(&new_page->list, &my_shmem_pages);
|
||||
my_shmem_page_count++;
|
||||
// my_shmem_page_count++;
|
||||
|
||||
// Fill in last_pg for final return from page fault handler
|
||||
last_pg = curr_pg;
|
||||
}
|
||||
// Fill in vmf's page for return
|
||||
// get_page(last_pg);
|
||||
get_page(last_pg);
|
||||
vmf->page = last_pg;
|
||||
_dbg_phys_of_page = page_to_phys(last_pg);
|
||||
|
||||
|
|
@ -153,11 +159,10 @@ err_ret_no_kmem:
|
|||
pr_err("[%s] Cannot allocate `struct my_shmem_page` in kernel memory.\n",
|
||||
__func__);
|
||||
return VM_FAULT_OOM;
|
||||
err_ret_no_vmem:
|
||||
err_ret_no_page:
|
||||
pr_err("[%s] Cannot allocate requested page for virtual memory.\n",
|
||||
__func__);
|
||||
return VM_FAULT_OOM;
|
||||
ok_ret_remapped:
|
||||
ok_ret_allocated:
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -176,11 +181,66 @@ static const struct file_operations my_shmem_fops;
|
|||
|
||||
static int my_shmem_fops_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
int ret = 0;
|
||||
ulong uvaddr = vma->vm_start;
|
||||
ulong upgoff = vma->vm_pgoff;
|
||||
struct my_shmem_page *curr;
|
||||
struct page *pg;
|
||||
const unsigned char *fp_name;
|
||||
|
||||
vma->vm_ops = &my_shmem_vmops;
|
||||
|
||||
pr_info("[%s] Device file '%s' mmapped for vma: [0x%lx - 0x%lx].\n",
|
||||
__func__, file_dentry(filp)->d_name.name, vma->vm_start, vma->vm_end);
|
||||
return 0;
|
||||
/* Remap as much as possible */
|
||||
mutex_lock(&my_shmem_pages_mtx);
|
||||
size_t my_shmem_page_count = list_count_nodes(&my_shmem_pages);
|
||||
if (!my_shmem_page_count) {
|
||||
mutex_unlock(&my_shmem_pages_mtx);
|
||||
goto cleanup_ok;
|
||||
}
|
||||
list_for_each_entry(curr, &my_shmem_pages, list) {
|
||||
/* If userspace virt addr >= vm_end, exit. */
|
||||
if (uvaddr >= vma->vm_end)
|
||||
break;
|
||||
|
||||
/* Wait until the vm_pgoff-th page, if exists. */
|
||||
if (upgoff != 0) {
|
||||
upgoff--;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Exists allocable page, remap */
|
||||
pg = curr->page;
|
||||
get_page(pg);
|
||||
ret = remap_pfn_range(
|
||||
vma, uvaddr, page_to_pfn(pg), PAGE_SIZE,
|
||||
vma->vm_page_prot);
|
||||
if (ret) {
|
||||
mutex_unlock(&my_shmem_pages_mtx);
|
||||
goto cleanup_err_remap_pfn_failed;
|
||||
}
|
||||
pr_info("[%s] Remapped pfn %ld (kvaddr: 0x%px) -> uvaddr: 0x%px.\n",
|
||||
__func__, page_to_pfn(pg), page_to_virt(pg), (void *) uvaddr);
|
||||
uvaddr += PAGE_SIZE;
|
||||
}
|
||||
/* May still have unmapped pages, we allocate lazily at fault time. */
|
||||
mutex_unlock(&my_shmem_pages_mtx);
|
||||
goto cleanup_ok;
|
||||
|
||||
cleanup_err_remap_pfn_failed:
|
||||
pr_err("[%s] Cannot remap pfn %ld (kvaddr: 0x%px) -> uvaddr: 0x%px: %d.\n",
|
||||
__func__, page_to_pfn(pg), page_to_virt(pg), (void *) uvaddr, ret);
|
||||
put_page(pg);
|
||||
goto exit;
|
||||
cleanup_ok:
|
||||
fp_name = file_dentry(filp)->d_name.name;
|
||||
if (uvaddr == vma->vm_end)
|
||||
pr_info("[%s] Device file '%s' mmapped for vma: [0x%lx - 0x%lx).\n",
|
||||
__func__, fp_name, vma->vm_start, vma->vm_end);
|
||||
else
|
||||
pr_info("[%s] Device file '%s' mmapped for vma: [0x%lx - 0x%lx..0x%lx).\n",
|
||||
__func__, fp_name, vma->vm_start, uvaddr, vma->vm_end);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int my_shmem_fops_open(struct inode *inode, struct file *filp)
|
||||
|
|
@ -203,6 +263,8 @@ static int my_shmem_fops_release(struct inode *inode, struct file *filp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
// static int
|
||||
|
||||
static const struct file_operations my_shmem_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = my_shmem_fops_open,
|
||||
|
|
@ -276,7 +338,7 @@ static void __exit my_shmem_exit(void)
|
|||
put_page(page_entry->page);
|
||||
free_page((ulong) page_to_virt(page_entry->page));
|
||||
|
||||
my_shmem_page_count--;
|
||||
// my_shmem_page_count--;
|
||||
list_del(&page_entry->list);
|
||||
kfree(page_entry);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue