--- //depot/jhb/superpages/sys/amd64/amd64/pmap.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/amd64/amd64/pmap.c 2008/09/17 11:45:01 @@ -220,7 +220,6 @@ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); -static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, @@ -433,8 +432,7 @@ if (ndmpdp < 4) /* Minimum 4GB of dirmap */ ndmpdp = 4; DMPDPphys = allocpages(firstaddr, NDMPML4E); - if ((amd_feature & AMDID_PAGE1GB) == 0) - DMPDphys = allocpages(firstaddr, ndmpdp); + DMPDphys = allocpages(firstaddr, ndmpdp); dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; /* Fill in the underlying page table pages */ @@ -460,31 +458,21 @@ /* And connect up the PD to the PDP */ for (i = 0; i < NKPDPE; i++) { - ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + - (i << PAGE_SHIFT); + ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT); ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U; } - /* Now set up the direct map space using either 2MB or 1GB pages */ - if ((amd_feature & AMDID_PAGE1GB) == 0) { - for (i = 0; i < NPDEPG * ndmpdp; i++) { - ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT; - ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | - PG_G; - } - /* And the direct map space's PDP */ - for (i = 0; i < ndmpdp; i++) { - ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + - (i << PAGE_SHIFT); - ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U; - } - } else { - for (i = 0; i < ndmpdp; i++) { - ((pdp_entry_t *)DMPDPphys)[i] = - (vm_paddr_t)i << PDPSHIFT; - ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS | - PG_G; - } + + /* Now set up the direct map space using 2MB pages */ + for (i = 0; i < NPDEPG * ndmpdp; i++) { + ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT; + ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G; + } + + /* And the direct map space's PDP */ + for (i = 0; i < ndmpdp; i++) { + ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (i << PAGE_SHIFT); + ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U; } /* And recursively map PML4 to itself in order to get PTmap */ @@ -3686,48 +3674,6 @@ } /* - * pmap_page_wired_mappings: - * - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - int count; - - count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) - return (count); - count = pmap_pvh_wired_mappings(&m->md, count); - return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count)); -} - -/* - * pmap_pvh_wired_mappings: - * - * Return the updated number "count" of managed mappings that are wired. - */ -static int -pmap_pvh_wired_mappings(struct md_page *pvh, int count) -{ - pmap_t pmap; - pt_entry_t *pte; - pv_entry_t pv; - - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - pte = pmap_pte(pmap, pv->pv_va); - if ((*pte & PG_W) != 0) - count++; - PMAP_UNLOCK(pmap); - } - return (count); -} - -/* * Returns TRUE if the given page is mapped individually or as part of * a 2mpage. Otherwise, returns FALSE. */ --- //depot/jhb/superpages/sys/arm/arm/pmap.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/arm/arm/pmap.c 2008/09/17 11:45:01 @@ -4475,27 +4475,6 @@ return (FALSE); } -/* - * pmap_page_wired_mappings: - * - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - pv_entry_t pv; - int count; - - count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) - return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - if ((pv->pv_flags & PVF_WIRED) != 0) - count++; - return (count); -} /* * pmap_ts_referenced: --- //depot/jhb/superpages/sys/i386/i386/pmap.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/i386/i386/pmap.c 2008/09/17 11:45:01 @@ -278,7 +278,6 @@ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); -static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, @@ -3808,50 +3807,6 @@ } /* - * pmap_page_wired_mappings: - * - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - int count; - - count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) - return (count); - count = pmap_pvh_wired_mappings(&m->md, count); - return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count)); -} - -/* - * pmap_pvh_wired_mappings: - * - * Return the updated number "count" of managed mappings that are wired. - */ -static int -pmap_pvh_wired_mappings(struct md_page *pvh, int count) -{ - pmap_t pmap; - pt_entry_t *pte; - pv_entry_t pv; - - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - sched_pin(); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - pte = pmap_pte_quick(pmap, pv->pv_va); - if ((*pte & PG_W) != 0) - count++; - PMAP_UNLOCK(pmap); - } - sched_unpin(); - return (count); -} - -/* * Returns TRUE if the given page is mapped individually or as part of * a 4mpage. Otherwise, returns FALSE. */ --- //depot/jhb/superpages/sys/ia64/ia64/pmap.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/ia64/ia64/pmap.c 2008/09/17 11:45:01 @@ -1881,38 +1881,6 @@ } /* - * pmap_page_wired_mappings: - * - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - struct ia64_lpte *pte; - pmap_t oldpmap, pmap; - pv_entry_t pv; - int count; - - count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) - return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { - pmap = pv->pv_pmap; - PMAP_LOCK(pmap); - oldpmap = pmap_switch(pmap); - pte = pmap_find_vhpt(pv->pv_va); - KASSERT(pte != NULL, ("pte")); - if (pmap_wired(pte)) - count++; - pmap_switch(oldpmap); - PMAP_UNLOCK(pmap); - } - return (count); -} - -/* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but --- //depot/jhb/superpages/sys/powerpc/powerpc/mmu_if.m 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/powerpc/powerpc/mmu_if.m 2008/09/17 11:45:01 @@ -431,21 +431,6 @@ /** - * @brief Count the number of managed mappings to the given physical - * page that are wired. - * - * @param _pg physical page - * - * @retval int the number of wired, managed mappings to the - * given physical page - */ -METHOD int page_wired_mappings { - mmu_t _mmu; - vm_page_t _pg; -}; - - -/** * @brief Initialise a physical map data structure * * @param _pmap physical map --- //depot/jhb/superpages/sys/powerpc/powerpc/mmu_oea.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/powerpc/powerpc/mmu_oea.c 2008/09/17 11:45:01 @@ -322,7 +322,6 @@ boolean_t moea_ts_referenced(mmu_t, vm_page_t); vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); -int moea_page_wired_mappings(mmu_t, vm_page_t); void moea_pinit(mmu_t, pmap_t); void moea_pinit0(mmu_t, pmap_t); void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); @@ -360,7 +359,6 @@ MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), MMUMETHOD(mmu_map, moea_map), MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), - MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), MMUMETHOD(mmu_pinit, moea_pinit), MMUMETHOD(mmu_pinit0, moea_pinit0), MMUMETHOD(mmu_protect, moea_protect), @@ -1494,26 +1492,6 @@ return (FALSE); } -/* - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -moea_page_wired_mappings(mmu_t mmu, vm_page_t m) -{ - struct pvo_entry *pvo; - int count; - - count = 0; - if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0) - return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) - if ((pvo->pvo_vaddr & PVO_WIRED) != 0) - count++; - return (count); -} - static u_int moea_vsidcontext; void --- //depot/jhb/superpages/sys/powerpc/powerpc/pmap_dispatch.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/powerpc/powerpc/pmap_dispatch.c 2008/09/17 11:45:01 @@ -194,13 +194,6 @@ } int -pmap_page_wired_mappings(vm_page_t m) -{ - - return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m)); -} - -int pmap_pinit(pmap_t pmap) { MMU_PINIT(mmu_obj, pmap); --- //depot/jhb/superpages/sys/sparc64/sparc64/pmap.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/sparc64/sparc64/pmap.c 2008/09/17 11:45:01 @@ -1759,26 +1759,6 @@ } /* - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - struct tte *tp; - int count; - - count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) - return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) - if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED)) - count++; - return (count); -} - -/* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove n the case of running down * an entire address space. Only works for the current pmap. --- //depot/jhb/superpages/sys/sun4v/sun4v/pmap.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/sun4v/sun4v/pmap.c 2008/09/17 11:45:01 @@ -1669,34 +1669,6 @@ TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } - -/* - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - pmap_t pmap; - pv_entry_t pv; - uint64_t tte_data; - int count; - - count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) - return (count); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { - pmap = pv->pv_pmap; - PMAP_LOCK(pmap); - tte_data = tte_hash_lookup(pmap->pm_hash, pv->pv_va); - if ((tte_data & VTD_WIRED) != 0) - count++; - PMAP_UNLOCK(pmap); - } - return (count); -} - /* * Lower the permission for all mappings to a given page. */ --- //depot/jhb/superpages/sys/vm/pmap.h 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/vm/pmap.h 2008/09/17 11:45:01 @@ -116,7 +116,6 @@ vm_object_t object, vm_pindex_t pindex, vm_size_t size); boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m); void pmap_page_init(vm_page_t m); -int pmap_page_wired_mappings(vm_page_t m); int pmap_pinit(pmap_t); void pmap_pinit0(pmap_t); void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); --- //depot/jhb/superpages/sys/vm/vm_object.c 2008/09/15 19:34:30 +++ //depot/jhb/superpages/sys/vm/vm_object.c 2008/09/17 11:45:01 @@ -1868,7 +1868,6 @@ boolean_t clean_only) { vm_page_t p, next; - int wirings; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if (object->resident_page_count == 0) @@ -1903,21 +1902,10 @@ p = next) { next = TAILQ_NEXT(p, listq); - /* - * If the page is wired for any reason besides the - * existence of managed, wired mappings, then it cannot - * be freed. For example, fictitious pages, which - * represent device memory, are inherently wired and - * cannot be freed. They can, however, be invalidated - * if "clean_only" is FALSE. - */ - if ((wirings = p->wire_count) != 0 && - (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { + if (p->wire_count!= 0) { /* Fictitious pages do not have managed mappings. */ if ((p->flags & PG_FICTITIOUS) == 0) pmap_remove_all(p); - /* Account for removal of managed, wired mappings. */ - p->wire_count -= wirings; if (!clean_only) p->valid = 0; continue; @@ -1932,9 +1920,6 @@ continue; } pmap_remove_all(p); - /* Account for removal of managed, wired mappings. */ - if (wirings != 0) - p->wire_count -= wirings; vm_page_free(p); } vm_page_unlock_queues();