--- //depot/vendor/freebsd/src/sys/vm/vm_page.c 2012-05-22 07:05:26.000000000 0000 +++ //depot/projects/fadvise/sys/vm/vm_page.c 2012-06-18 19:50:53.000000000 0000 @@ -1187,6 +1187,17 @@ m->flags ^= PG_CACHED | PG_FREE; KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, ("vm_page_cache_free: page %p has inconsistent flags", m)); +#if 1 + if (vm_phys_unfree_page(m)) { + vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); + vm_phys_free_pages(m, 0); +#if VM_NRESERVLEVEL > 0 + } else if (!vm_reserv_cache_free(m)) +#else + } else +#endif + panic("cache page is not free"); +#endif cnt.v_cache_count--; cnt.v_free_count++; } --- //depot/vendor/freebsd/src/sys/vm/vm_phys.c 2012-05-12 20:45:15.000000000 0000 +++ //depot/projects/fadvise/sys/vm/vm_phys.c 2012-06-18 19:50:53.000000000 0000 @@ -36,6 +36,8 @@ * virtual memory system. */ +#define CACHE_SEPARATE + #include __FBSDID("$FreeBSD: src/sys/vm/vm_phys.c,v 1.28 2012/05/12 20:42:56 kib Exp $"); @@ -128,6 +130,18 @@ static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order); +static int vm_phys_uncached; +SYSCTL_INT(_vm, OID_AUTO, phys_uncached, CTLFLAG_RD, &vm_phys_uncached, 0, ""); +static int vm_phys_uc_alloc_pages; +SYSCTL_INT(_vm, OID_AUTO, phys_uc_alloc_pages, CTLFLAG_RD, + &vm_phys_uc_alloc_pages, 0, ""); +static int vm_phys_uc_alloc_pages2; +SYSCTL_INT(_vm, OID_AUTO, phys_uc_alloc_pages2, CTLFLAG_RD, + &vm_phys_uc_alloc_pages2, 0, ""); +static int vm_phys_uc_free_pages; +SYSCTL_INT(_vm, OID_AUTO, phys_uc_free_pages, CTLFLAG_RD, + &vm_phys_uc_free_pages, 0, ""); + /* * Outputs the state of the physical memory allocator, specifically, * the amount of physical memory in each free list. @@ -455,6 +469,11 @@ struct vm_freelist *alt; int domain, oind, pind; vm_page_t m; +#ifdef CACHE_SEPARATE + struct vm_phys_seg *seg; + vm_paddr_t pa; + vm_page_t m_next, m_buddy; +#endif KASSERT(flind < VM_NFREELIST, ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); @@ -495,12 +514,78 @@ TAILQ_REMOVE(&alt[oind].pl, m, pageq); alt[oind].lcnt--; m->order = VM_NFREEORDER; + if (m->pool == VM_FREEPOOL_CACHE && + pool != VM_FREEPOOL_CACHE) + vm_phys_uc_alloc_pages++; vm_phys_set_pool(pool, m, oind); vm_phys_split_pages(m, oind, fl, order); return (m); } } } + +#ifdef CACHE_SEPARATE + /* + * XXX: If we get here, do deferred merging of cache pages + * with pages from another pool to satisfy the request and + * try again. This may be quite hard to do, and certainly + * not very efficient. + */ + for (oind = order - 1; oind > 0; oind--) { + alt = (*vm_phys_lookup_lists[domain][flind])[VM_FREEPOOL_CACHE]; + TAILQ_FOREACH_SAFE(m, &alt[oind].pl, pageq, m_next) { + struct vm_freelist *fl2; + int newoind; + + seg = &vm_phys_segs[m->segind]; + pa = VM_PAGE_TO_PHYS(m); + newoind = oind; + do { + pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + newoind)); + if (pa < seg->start || pa >= seg->end) + break; + m_buddy = &seg->first_page[atop(pa - seg->start)]; + if (m_buddy->order != newoind) + break; + fl2 = (*seg->free_queues)[m_buddy->pool]; + /* + * Two same-sized buddies should not + * be on the 'alt[oind].pl' list. + */ + KASSERT(m_buddy != m_next, + ("identical buddies")); + TAILQ_REMOVE(&fl2[newoind].pl, m_buddy, pageq); + fl2[order].lcnt--; + m_buddy->order = VM_NFREEORDER; + if (m_buddy->pool != m->pool) { + if (m->pool == VM_FREEPOOL_CACHE) { + vm_phys_uc_alloc_pages2++; + vm_phys_set_pool(m_buddy->pool, + m, newoind); + } else { + if (m_buddy->pool == + VM_FREEPOOL_CACHE) + vm_phys_uc_alloc_pages2++; + vm_phys_set_pool(m->pool, + m_buddy, newoind); + } + } + newoind++; + pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + newoind)) - 1); + m = &seg->first_page[atop(pa - seg->start)]; + } while (newoind < order); + m->order = newoind; + if (newoind == order) { + if (m->pool != pool) + vm_phys_set_pool(pool, m, order); + return (m); + } + fl2 = (*seg->free_queues)[m->pool]; + TAILQ_INSERT_TAIL(&fl2[newoind].pl, m, pageq); + fl2[newoind].lcnt++; + } + } +#endif return (NULL); } @@ -677,12 +762,35 @@ m_buddy = &seg->first_page[atop(pa - seg->start)]; if (m_buddy->order != order) break; +#ifdef CACHE_SEPARATE + if (m_buddy->pool != m->pool && + (m_buddy->pool == VM_FREEPOOL_CACHE || + m->pool == VM_FREEPOOL_CACHE)) + break; +#endif fl = (*seg->free_queues)[m_buddy->pool]; TAILQ_REMOVE(&fl[order].pl, m_buddy, pageq); fl[order].lcnt--; m_buddy->order = VM_NFREEORDER; - if (m_buddy->pool != m->pool) + if (m_buddy->pool != m->pool) { +#if 1 + if (m_buddy->pool == VM_FREEPOOL_CACHE) + vm_phys_uc_free_pages++; vm_phys_set_pool(m->pool, m_buddy, order); +#else + if (m_buddy->pool < m->pool) { + if (m_buddy->pool == VM_FREEPOOL_CACHE) + vm_phys_uc_free_pages++; + vm_phys_set_pool(m->pool, m_buddy, + order); + } else { + if (m->pool == VM_FREEPOOL_CACHE) + vm_phys_uc_free_pages++; + vm_phys_set_pool(m_buddy->pool, m, + order); + } +#endif + } order++; pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); m = &seg->first_page[atop(pa - seg->start)]; @@ -743,8 +851,12 @@ { vm_page_t m_tmp; - for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) + for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) { + if (m_tmp->pool == VM_FREEPOOL_CACHE && + pool != VM_FREEPOOL_CACHE) + vm_phys_uncached++; m_tmp->pool = pool; + } } /* --- //depot/vendor/freebsd/src/sys/vm/vm_reserv.c 2012-04-08 17:05:18.000000000 0000 +++ //depot/projects/fadvise/sys/vm/vm_reserv.c 2012-06-18 19:50:53.000000000 0000 @@ -640,6 +640,24 @@ } /* + * Note a cached page has been moved to free. Returns TRUE if this + * page belongs to a reservation. + * + * The free page queue lock must be held. + */ +boolean_t +vm_reserv_cache_free(vm_page_t m) +{ + vm_reserv_t rv; + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + rv = vm_reserv_from_page(m); + if (rv->object == NULL) + return (FALSE); + return (TRUE); +} + +/* * Frees the given page if it belongs to a reservation. Returns TRUE if the * page is freed and FALSE otherwise. * --- //depot/vendor/freebsd/src/sys/vm/vm_reserv.h 2011-12-05 18:30:20.000000000 0000 +++ //depot/projects/fadvise/sys/vm/vm_reserv.h 2012-06-18 19:50:53.000000000 0000 @@ -50,6 +50,7 @@ u_long alignment, vm_paddr_t boundary); vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex); void vm_reserv_break_all(vm_object_t object); +boolean_t vm_reserv_cache_free(vm_page_t m); boolean_t vm_reserv_free_page(vm_page_t m); void vm_reserv_init(void); int vm_reserv_level_iffullpop(vm_page_t m);