diff --git a/src/aarch64-linux-flush-dcache/my_shmem.c b/src/aarch64-linux-flush-dcache/my_shmem.c index 9f18926..8bb411f 100644 --- a/src/aarch64-linux-flush-dcache/my_shmem.c +++ b/src/aarch64-linux-flush-dcache/my_shmem.c @@ -54,17 +54,17 @@ const char* DEV_NAME = "my_shmem"; */ static void my_shmem_vmops_close(struct vm_area_struct *vma) { - size_t nr_pages_in_cache = list_count_nodes(&my_shmem_pages); + size_t nr_pages_in_cache = list_count_nodes(&my_shmem_pages); size_t nr_pages_of_vma = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; pr_info( - "[%s] Entered. vma size: %ld; cached pages: %ld.\n", + "[%s] Entered. vma size: %ld; cached pages: %ld.\n", __func__, nr_pages_of_vma, nr_pages_in_cache ); size_t nr_pages_offset = vma->vm_pgoff; struct my_shmem_page *entry; // u64 clean_time_bgn, clean_time_end; - u64 runtime; + // u64 runtime; mutex_lock(&my_shmem_pages_mtx); list_for_each_entry(entry, &my_shmem_pages, list) { @@ -87,10 +87,10 @@ static void my_shmem_vmops_close(struct vm_area_struct *vma) pr_info("[%s] Before flush: 0x%px has 0x%lx [+%ld].\n", __func__, (void *) kvaddr_bgn, *(ulong *) kvaddr_bgn, sizeof(ulong)); - __dcache_clean_poc_dbg(kvaddr_bgn, kvaddr_end, &runtime); - pr_info("[%s] After flush: 0x%px has 0x%lx [+%ld]. Runtime: %lldns.\n", + __dcache_clean_poc(kvaddr_bgn, kvaddr_end); + pr_info("[%s] After flush: 0x%px has 0x%lx [+%ld].\n", __func__, (void *) kvaddr_bgn, *(ulong *) kvaddr_bgn, - sizeof(ulong), runtime); + sizeof(ulong)); put_page(pg); @@ -155,7 +155,7 @@ static vm_fault_t my_shmem_vmops_fault(struct vm_fault *vmf) // Fill in vmf's page for return get_page(last_pg); vmf->page = last_pg; - // ret = vmf_insert_page(vma_of_vmf, vmf->address, last_pg); + // ret = vmf_insert_page(vma_of_vmf, vmf->adget_pagedress, last_pg); // [!] YOU DON'T NEED TO CALL REMAP_PFN_RANGE OR FAMILY HERE!!! // `__do_fault` allocates PTE prior to calling `vm_ops->fault`, // and at return `finish_fault` inserts PTE for given page. @@ -357,8 +357,10 @@ static void __exit my_shmem_exit(void) struct my_shmem_page *page_entry, *tmp; mutex_lock(&my_shmem_pages_mtx); list_for_each_entry_safe(page_entry, tmp, &my_shmem_pages, list) { - put_page(page_entry->page); - free_page((ulong) page_to_virt(page_entry->page)); + // put_page(page_entry->page); + BUG_ON(atomic_read(&page_entry->page->_refcount) != 1); + __free_page(page_entry->page); // no put_page since we don't want double-free + // free_page((ulong) page_to_virt(page_entry->page)); // my_shmem_page_count--; list_del(&page_entry->list);