Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 27 Dec 2016 21:50:32 -0800
From:      Adrian Chadd <adrian.chadd@gmail.com>
To:        Alexander Kabaev <kan@freebsd.org>
Cc:        "src-committers@freebsd.org" <src-committers@freebsd.org>,  "svn-src-all@freebsd.org" <svn-src-all@freebsd.org>,  "svn-src-head@freebsd.org" <svn-src-head@freebsd.org>
Subject:   Re: svn commit: r310650 - in head/sys/mips: include mips
Message-ID:  <CAJ-Vmo=bMUNbfgaTdUC8ToN9yLZuyqN5ws79vmwP8OJcRfR1ag@mail.gmail.com>
In-Reply-To: <201612280255.uBS2tQeR045512@repo.freebsd.org>
References:  <201612280255.uBS2tQeR045512@repo.freebsd.org>

next in thread | previous in thread | raw e-mail | index | archive | help
hiya,

so I dug into the mips24k definition of this. It says this:

"
3.4.3 Uncached accelerated writes
The 24K core permits memory regions to be marked as =E2=80=9Cuncached
accelerated=E2=80=9D. This type of region is useful to hard-
ware which is =E2=80=9Cwrite only=E2=80=9D - perhaps video frame buffers, o=
r some
other hardware stream. Sequential word stores in
such regions are gathered into cache-line-sized chunks, before being
written with a single burst cycle on the CPU
interface.
Such regions are uncached for read, and partial-word or
out-of-sequence writes have =E2=80=9Cunpredictable=E2=80=9D effects - don=
=E2=80=99t
do them. The burst write is normally performed when software writes to
the last location in the memory block or does
an uncached-accelerated write to some other block; but it can also be
triggered by a
sync instruction, a pref nudge, a matching load or any exception. If
the block is not completely written by the time it=E2=80=99s pushed out, it
will be written using a series of doubleword or smaller write cycles
over the 24K core=E2=80=99s 64-bit memory interface.
"

So, question is - is our write combining page attribute in the VM
suitable for this? Is it defined as "only do full sequential word
writes"? Or do we risk having some other platform use it in a less
"don't do this" way and then MIPS is the one at fault again? :)


-adrian



On 27 December 2016 at 18:55, Alexander Kabaev <kan@freebsd.org> wrote:
> Author: kan
> Date: Wed Dec 28 02:55:26 2016
> New Revision: 310650
> URL: https://svnweb.freebsd.org/changeset/base/310650
>
> Log:
>   Implement pmap_change_attr and related APIs on MIPS
>
>   On platforms that have uncached-accelerate cache attribute, map it
>   to VM_MEMATTR_WRITE_COMBINING. Otherwise, leave write comining
>   undefined.
>
>   Reviewed by:  adrian, jhb (glance)
>   Differential Revision:        https://reviews.freebsd.org/D8894
>
> Modified:
>   head/sys/mips/include/cpuregs.h
>   head/sys/mips/include/pmap.h
>   head/sys/mips/include/pte.h
>   head/sys/mips/include/vm.h
>   head/sys/mips/mips/pmap.c
>
> Modified: head/sys/mips/include/cpuregs.h
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D
> --- head/sys/mips/include/cpuregs.h     Tue Dec 27 23:56:46 2016        (=
r310649)
> +++ head/sys/mips/include/cpuregs.h     Wed Dec 28 02:55:26 2016        (=
r310650)
> @@ -171,6 +171,10 @@
>  #define        MIPS_CCA_CACHED         MIPS_CCA_CCS
>  #endif
>
> +#if defined(CPU_XBURST)
> +#define        MIPS_CCA_UA             0x01
> +#endif
> +
>  #ifndef        MIPS_CCA_UNCACHED
>  #define        MIPS_CCA_UNCACHED       MIPS_CCA_UC
>  #endif
> @@ -188,6 +192,16 @@
>  #endif
>  #endif
>
> +/*
> + * Use uncached-accelerated mode for write-combining maps, if one is def=
ined,
> + * otherwise fall back to uncached
> + */
> +#ifndef MIPS_CCA_WC
> +#ifdef MIPS_CCA_UA
> +#define        MIPS_CCA_WC MIPS_CCA_UA
> +#endif
> +#endif
> +
>  #define        MIPS_PHYS_TO_XKPHYS(cca,x) \
>         ((0x2ULL << 62) | ((unsigned long long)(cca) << 59) | (x))
>  #define        MIPS_PHYS_TO_XKPHYS_CACHED(x) \
>
> Modified: head/sys/mips/include/pmap.h
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D
> --- head/sys/mips/include/pmap.h        Tue Dec 27 23:56:46 2016        (=
r310649)
> +++ head/sys/mips/include/pmap.h        Wed Dec 28 02:55:26 2016        (=
r310650)
> @@ -74,7 +74,8 @@ struct md_page {
>  };
>
>  #define        PV_TABLE_REF            0x02    /* referenced */
> -#define        PV_MEMATTR_UNCACHEABLE  0x04
> +#define        PV_MEMATTR_MASK         0xf0    /* store vm_memattr_t her=
e */
> +#define        PV_MEMATTR_SHIFT        0x04
>
>  #define        ASID_BITS               8
>  #define        ASIDGEN_BITS            (32 - ASID_BITS)
> @@ -163,22 +164,24 @@ extern vm_offset_t virtual_end;
>
>  extern vm_paddr_t dump_avail[PHYS_AVAIL_ENTRIES + 2];
>
> -#define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
> +#define        pmap_page_get_memattr(m) (((m)->md.pv_flags & PV_MEMATTR_=
MASK) >> PV_MEMATTR_SHIFT)
>  #define        pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
>  #define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITE=
ABLE) !=3D 0)
>
>  void pmap_bootstrap(void);
>  void *pmap_mapdev(vm_paddr_t, vm_size_t);
> +void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
>  void pmap_unmapdev(vm_offset_t, vm_size_t);
>  vm_offset_t pmap_steal_memory(vm_size_t size);
>  void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
> -void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr);
> +void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t attr);
>  void pmap_kremove(vm_offset_t va);
>  void *pmap_kenter_temporary(vm_paddr_t pa, int i);
>  void pmap_kenter_temporary_free(vm_paddr_t pa);
>  void pmap_flush_pvcache(vm_page_t m);
>  int pmap_emulate_modified(pmap_t pmap, vm_offset_t va);
>  void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
> +int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
>
>  #endif                         /* _KERNEL */
>
>
> Modified: head/sys/mips/include/pte.h
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D
> --- head/sys/mips/include/pte.h Tue Dec 27 23:56:46 2016        (r310649)
> +++ head/sys/mips/include/pte.h Wed Dec 28 02:55:26 2016        (r310650)
> @@ -132,8 +132,10 @@ typedef    pt_entry_t *pd_entry_t;
>   *             it is matched.
>   */
>  #define        PTE_C(attr)             ((attr & 0x07) << 3)
> +#define        PTE_C_MASK              (PTE_C(0x07))
>  #define        PTE_C_UNCACHED          (PTE_C(MIPS_CCA_UNCACHED))
>  #define        PTE_C_CACHE             (PTE_C(MIPS_CCA_CACHED))
> +#define        PTE_C_WC                (PTE_C(MIPS_CCA_WC))
>  #define        PTE_D                   0x04
>  #define        PTE_V                   0x02
>  #define        PTE_G                   0x01
> @@ -158,6 +160,7 @@ typedef     pt_entry_t *pd_entry_t;
>  #define        pte_clear(pte, bit)     (*(pte) &=3D ~(bit))
>  #define        pte_set(pte, bit)       (*(pte) |=3D (bit))
>  #define        pte_test(pte, bit)      ((*(pte) & (bit)) =3D=3D (bit))
> +#define        pte_cache_bits(pte)     ((*(pte) >> 3) & 0x07)
>
>  /* Assembly support for PTE access*/
>  #ifdef LOCORE
>
> Modified: head/sys/mips/include/vm.h
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D
> --- head/sys/mips/include/vm.h  Tue Dec 27 23:56:46 2016        (r310649)
> +++ head/sys/mips/include/vm.h  Wed Dec 28 02:55:26 2016        (r310650)
> @@ -32,7 +32,11 @@
>  #include <machine/pte.h>
>
>  /* Memory attributes. */
> -#define        VM_MEMATTR_UNCACHEABLE  ((vm_memattr_t)PTE_C_UNCACHED)
> -#define        VM_MEMATTR_DEFAULT      ((vm_memattr_t)PTE_C_CACHE)
> +#define        VM_MEMATTR_UNCACHEABLE          ((vm_memattr_t)MIPS_CCA_U=
NCACHED)
> +#define        VM_MEMATTR_WRITE_BACK           ((vm_memattr_t)MIPS_CCA_C=
ACHED)
> +#define        VM_MEMATTR_DEFAULT              VM_MEMATTR_WRITE_BACK
> +#ifdef MIPS_CCA_WC
> +#define        VM_MEMATTR_WRITE_COMBINING      ((vm_memattr_t)MIPS_CCA_W=
C)
> +#endif
>
>  #endif /* !_MACHINE_VM_H_ */
>
> Modified: head/sys/mips/mips/pmap.c
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D
> --- head/sys/mips/mips/pmap.c   Tue Dec 27 23:56:46 2016        (r310649)
> +++ head/sys/mips/mips/pmap.c   Wed Dec 28 02:55:26 2016        (r310650)
> @@ -189,10 +189,10 @@ static void pmap_update_page_action(void
>   * The highmem area does not have a KSEG0 mapping, and we need a mechani=
sm to
>   * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
>   *
> - * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pag=
es. To
> + * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pag=
es. To
>   * access a highmem physical address on a CPU, we map the physical addre=
ss to
> - * the reserved virtual address for the CPU in the kernel pagetable.  Th=
is is
> - * done with interrupts disabled(although a spinlock and sched_pin would=
 be
> + * the reserved virtual address for the CPU in the kernel pagetable.  Th=
is is
> + * done with interrupts disabled(although a spinlock and sched_pin would=
 be
>   * sufficient).
>   */
>  struct local_sysmaps {
> @@ -303,7 +303,7 @@ pmap_lmem_map2(vm_paddr_t phys1, vm_padd
>         return (0);
>  }
>
> -static __inline vm_offset_t
> +static __inline vm_offset_t
>  pmap_lmem_unmap(void)
>  {
>
> @@ -312,12 +312,18 @@ pmap_lmem_unmap(void)
>  #endif /* !__mips_n64 */
>
>  static __inline int
> -is_cacheable_page(vm_paddr_t pa, vm_page_t m)
> +pmap_pte_cache_bits(vm_paddr_t pa, vm_page_t m)
>  {
> +       vm_memattr_t ma;
>
> -       return ((m->md.pv_flags & PV_MEMATTR_UNCACHEABLE) =3D=3D 0 &&
> -           is_cacheable_mem(pa));
> -
> +       ma =3D pmap_page_get_memattr(m);
> +       if (ma =3D=3D VM_MEMATTR_WRITE_BACK && !is_cacheable_mem(pa))
> +               ma =3D VM_MEMATTR_UNCACHEABLE;
> +       return PTE_C(ma);
> +}
> +#define PMAP_PTE_SET_CACHE_BITS(pte, ps, m) {  \
> +       pte &=3D ~PTE_C_MASK;                     \
> +       pte |=3D pmap_pte_cache_bits(pa, m);      \
>  }
>
>  /*
> @@ -359,7 +365,7 @@ pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_of
>         return (pdpe);
>  }
>
> -static __inline
> +static __inline
>  pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
>  {
>
> @@ -423,7 +429,7 @@ pmap_steal_memory(vm_size_t size)
>   * Bootstrap the system enough to run with virtual memory.  This
>   * assumes that the phys_avail array has been initialized.
>   */
> -static void
> +static void
>  pmap_create_kernel_pagetable(void)
>  {
>         int i, j;
> @@ -486,7 +492,7 @@ void
>  pmap_bootstrap(void)
>  {
>         int i;
> -       int need_local_mappings =3D 0;
> +       int need_local_mappings =3D 0;
>
>         /* Sort. */
>  again:
> @@ -600,7 +606,7 @@ pmap_page_init(vm_page_t m)
>  {
>
>         TAILQ_INIT(&m->md.pv_list);
> -       m->md.pv_flags =3D 0;
> +       m->md.pv_flags =3D VM_MEMATTR_DEFAULT << PV_MEMATTR_SHIFT;
>  }
>
>  /*
> @@ -635,8 +641,8 @@ pmap_call_on_active_cpus(pmap_t pmap, vo
>                         pmap->pm_asid[cpu].gen =3D 0;
>         }
>         cpuid =3D PCPU_GET(cpuid);
> -       /*
> -        * XXX: barrier/locking for active?
> +       /*
> +        * XXX: barrier/locking for active?
>          *
>          * Take a snapshot of active here, any further changes are ignore=
d.
>          * tlb update/invalidate should be harmless on inactive CPUs
> @@ -819,7 +825,7 @@ retry:
>   * add a wired page to the kva
>   */
>  void
> -pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
> +pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
>  {
>         pt_entry_t *pte;
>         pt_entry_t opte, npte;
> @@ -830,7 +836,7 @@ pmap_kenter_attr(vm_offset_t va, vm_padd
>
>         pte =3D pmap_pte(kernel_pmap, va);
>         opte =3D *pte;
> -       npte =3D TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G;
> +       npte =3D TLBLO_PA_TO_PFN(pa) | PTE_C(ma) | PTE_D | PTE_V | PTE_G;
>         *pte =3D npte;
>         if (pte_test(&opte, PTE_V) && opte !=3D npte)
>                 pmap_update_page(kernel_pmap, va, npte);
> @@ -843,7 +849,7 @@ pmap_kenter(vm_offset_t va, vm_paddr_t p
>         KASSERT(is_cacheable_mem(pa),
>                 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long=
)pa));
>
> -       pmap_kenter_attr(va, pa, PTE_C_CACHE);
> +       pmap_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
>  }
>
>  /*
> @@ -1144,11 +1150,11 @@ _pmap_allocpte(pmap_t pmap, unsigned pte
>                 int segindex =3D ptepindex >> (SEGSHIFT - PDRSHIFT);
>                 int pdeindex =3D ptepindex & (NPDEPG - 1);
>                 vm_page_t pg;
> -
> +
>                 pdep =3D &pmap->pm_segtab[segindex];
> -               if (*pdep =3D=3D NULL) {
> +               if (*pdep =3D=3D NULL) {
>                         /* recurse for allocating page dir */
> -                       if (_pmap_allocpte(pmap, NUPDE + segindex,
> +                       if (_pmap_allocpte(pmap, NUPDE + segindex,
>                             flags) =3D=3D NULL) {
>                                 /* alloc failed, release current */
>                                 --m->wire_count;
> @@ -1680,7 +1686,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm
>   * pmap_remove_pte: do the things to unmap a page in a process
>   */
>  static int
> -pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
> +pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
>      pd_entry_t pde)
>  {
>         pt_entry_t oldpte;
> @@ -1864,7 +1870,7 @@ pmap_remove_all(vm_page_t m)
>                 PMAP_LOCK(pmap);
>
>                 /*
> -                * If it's last mapping writeback all caches from
> +                * If it's last mapping writeback all caches from
>                  * the page being destroyed
>                  */
>                 if (TAILQ_NEXT(pv, pv_list) =3D=3D NULL)
> @@ -2030,10 +2036,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va,
>                 newpte |=3D PTE_W;
>         if (is_kernel_pmap(pmap))
>                 newpte |=3D PTE_G;
> -       if (is_cacheable_page(pa, m))
> -               newpte |=3D PTE_C_CACHE;
> -       else
> -               newpte |=3D PTE_C_UNCACHED;
> +       PMAP_PTE_SET_CACHE_BITS(newpte, pa, m);
>
>         mpte =3D NULL;
>
> @@ -2218,7 +2221,7 @@ static vm_page_t
>  pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
>      vm_prot_t prot, vm_page_t mpte)
>  {
> -       pt_entry_t *pte;
> +       pt_entry_t *pte, npte;
>         vm_paddr_t pa;
>
>         KASSERT(va < kmi.clean_sva || va >=3D kmi.clean_eva ||
> @@ -2297,18 +2300,16 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
>         /*
>          * Now validate mapping with RO protection
>          */
> -       *pte =3D PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
> +       npte =3D PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
>         if ((m->oflags & VPO_UNMANAGED) =3D=3D 0)
> -               *pte |=3D PTE_MANAGED;
> +               npte |=3D PTE_MANAGED;
>
> -       if (is_cacheable_page(pa, m))
> -               *pte |=3D PTE_C_CACHE;
> -       else
> -               *pte |=3D PTE_C_UNCACHED;
> +       PMAP_PTE_SET_CACHE_BITS(npte, pa, m);
>
>         if (is_kernel_pmap(pmap))
> -               *pte |=3D PTE_G;
> +               *pte =3D npte | PTE_G;
>         else {
> +               *pte =3D npte;
>                 /*
>                  * Sync I & D caches.  Do this only if the target pmap
>                  * belongs to the current process.  Otherwise, an
> @@ -2649,12 +2650,12 @@ pmap_quick_enter_page(vm_page_t m)
>  #else
>         vm_paddr_t pa;
>         struct local_sysmaps *sysm;
> -       pt_entry_t *pte;
> +       pt_entry_t *pte, npte;
>
>         pa =3D VM_PAGE_TO_PHYS(m);
>
>         if (MIPS_DIRECT_MAPPABLE(pa)) {
> -               if (m->md.pv_flags & PV_MEMATTR_UNCACHEABLE)
> +               if (pmap_page_get_memattr(m) !=3D VM_MEMATTR_WRITE_BACK)
>                         return (MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
>                 else
>                         return (MIPS_PHYS_TO_DIRECT(pa));
> @@ -2665,8 +2666,9 @@ pmap_quick_enter_page(vm_page_t m)
>         KASSERT(sysm->valid1 =3D=3D 0, ("pmap_quick_enter_page: PTE busy"=
));
>
>         pte =3D pmap_pte(kernel_pmap, sysm->base);
> -       *pte =3D TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G |
> -           (is_cacheable_page(pa, m) ? PTE_C_CACHE : PTE_C_UNCACHED);
> +       npte =3D TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G;
> +       PMAP_PTE_SET_CACHE_BITS(npte, pa, m);
> +       *pte =3D npte;
>         sysm->valid1 =3D 1;
>
>         return (sysm->base);
> @@ -3146,26 +3148,26 @@ pmap_is_referenced(vm_page_t m)
>   * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
>   */
>  void *
> -pmap_mapdev(vm_paddr_t pa, vm_size_t size)
> +pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
>  {
>          vm_offset_t va, tmpva, offset;
>
> -       /*
> -        * KSEG1 maps only first 512M of phys address space. For
> +       /*
> +        * KSEG1 maps only first 512M of phys address space. For
>          * pa > 0x20000000 we should make proper mapping * using pmap_ken=
ter.
>          */
> -       if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
> +       if (MIPS_DIRECT_MAPPABLE(pa + size - 1) && ma =3D=3D VM_MEMATTR_U=
NCACHEABLE)
>                 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
>         else {
>                 offset =3D pa & PAGE_MASK;
>                 size =3D roundup(size + offset, PAGE_SIZE);
> -
> +
>                 va =3D kva_alloc(size);
>                 if (!va)
>                         panic("pmap_mapdev: Couldn't alloc kernel virtual=
 memory");
>                 pa =3D trunc_page(pa);
>                 for (tmpva =3D va; size > 0;) {
> -                       pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
> +                       pmap_kenter_attr(tmpva, pa, ma);
>                         size -=3D PAGE_SIZE;
>                         tmpva +=3D PAGE_SIZE;
>                         pa +=3D PAGE_SIZE;
> @@ -3175,6 +3177,12 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t siz
>         return ((void *)(va + offset));
>  }
>
> +void *
> +pmap_mapdev(vm_paddr_t pa, vm_size_t size)
> +{
> +       return pmap_mapdev_attr(pa, size, VM_MEMATTR_UNCACHEABLE);
> +}
> +
>  void
>  pmap_unmapdev(vm_offset_t va, vm_size_t size)
>  {
> @@ -3220,7 +3228,7 @@ retry:
>                  * This may falsely report the given address as
>                  * MINCORE_REFERENCED.  Unfortunately, due to the lack of
>                  * per-PTE reference information, it is impossible to
> -                * determine if the address is MINCORE_REFERENCED.
> +                * determine if the address is MINCORE_REFERENCED.
>                  */
>                 m =3D PHYS_TO_VM_PAGE(pa);
>                 if ((m->aflags & PGA_REFERENCED) !=3D 0)
> @@ -3500,7 +3508,7 @@ pmap_kextract(vm_offset_t va)
>         mapped =3D (va >=3D MIPS_KSEG2_START || va < MIPS_KSEG2_END);
>  #if defined(__mips_n64)
>         mapped =3D mapped || (va >=3D MIPS_XKSEG_START || va < MIPS_XKSEG=
_END);
> -#endif
> +#endif
>         /*
>          * Kernel virtual.
>          */
> @@ -3524,7 +3532,7 @@ pmap_kextract(vm_offset_t va)
>  }
>
>
> -void
> +void
>  pmap_flush_pvcache(vm_page_t m)
>  {
>         pv_entry_t pv;
> @@ -3551,12 +3559,85 @@ pmap_page_set_memattr(vm_page_t m, vm_me
>         if (TAILQ_FIRST(&m->md.pv_list) !=3D NULL)
>                 panic("Can't change memattr on page with existing mapping=
s");
>
> -       /*
> -        * The only memattr we support is UNCACHEABLE, translate the (sem=
i-)MI
> -        * representation of that into our internal flag in the page MD s=
truct.
> -        */
> -       if (ma =3D=3D VM_MEMATTR_UNCACHEABLE)
> -               m->md.pv_flags |=3D PV_MEMATTR_UNCACHEABLE;
> -       else
> -               m->md.pv_flags &=3D ~PV_MEMATTR_UNCACHEABLE;
> +       /* Clean memattr portion of pv_flags */
> +       m->md.pv_flags &=3D ~PV_MEMATTR_MASK;
> +       m->md.pv_flags |=3D (ma << PV_MEMATTR_SHIFT) & PV_MEMATTR_MASK;
> +}
> +
> +static __inline void
> +pmap_pte_attr(pt_entry_t *pte, vm_memattr_t ma)
> +{
> +       u_int npte;
> +
> +       npte =3D *(u_int *)pte;
> +       npte &=3D ~PTE_C_MASK;
> +       npte |=3D PTE_C(ma);
> +       *pte =3D npte;
> +}
> +
> +int
> +pmap_change_attr(vm_offset_t sva, vm_size_t size, vm_memattr_t ma)
> +{
> +       pd_entry_t *pde, *pdpe;
> +       pt_entry_t *pte;
> +       vm_offset_t ova, eva, va, va_next;
> +       pmap_t pmap;
> +
> +       ova =3D sva;
> +       eva =3D sva + size;
> +       if (eva < sva)
> +               return (EINVAL);
> +
> +       pmap =3D kernel_pmap;
> +       PMAP_LOCK(pmap);
> +
> +       for (; sva < eva; sva =3D va_next) {
> +               pdpe =3D pmap_segmap(pmap, sva);
> +#ifdef __mips_n64
> +               if (*pdpe =3D=3D 0) {
> +                       va_next =3D (sva + NBSEG) & ~SEGMASK;
> +                       if (va_next < sva)
> +                               va_next =3D eva;
> +                       continue;
> +               }
> +#endif
> +               va_next =3D (sva + NBPDR) & ~PDRMASK;
> +               if (va_next < sva)
> +                       va_next =3D eva;
> +
> +               pde =3D pmap_pdpe_to_pde(pdpe, sva);
> +               if (*pde =3D=3D NULL)
> +                       continue;
> +
> +               /*
> +                * Limit our scan to either the end of the va represented
> +                * by the current page table page, or to the end of the
> +                * range being removed.
> +                */
> +               if (va_next > eva)
> +                       va_next =3D eva;
> +
> +               va =3D va_next;
> +               for (pte =3D pmap_pde_to_pte(pde, sva); sva !=3D va_next;=
 pte++,
> +                   sva +=3D PAGE_SIZE) {
> +                       if (!pte_test(pte, PTE_V) || pte_cache_bits(pte) =
=3D=3D ma) {
> +                               if (va !=3D va_next) {
> +                                       pmap_invalidate_range(pmap, va, s=
va);
> +                                       va =3D va_next;
> +                               }
> +                               continue;
> +                       }
> +                       if (va =3D=3D va_next)
> +                               va =3D sva;
> +
> +                       pmap_pte_attr(pte, ma);
> +               }
> +               if (va !=3D va_next)
> +                       pmap_invalidate_range(pmap, va, sva);
> +       }
> +       PMAP_UNLOCK(pmap);
> +
> +       /* Flush caches to be in the safe side */
> +       mips_dcache_wbinv_range(ova, size);
> +       return 0;
>  }
>



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?CAJ-Vmo=bMUNbfgaTdUC8ToN9yLZuyqN5ws79vmwP8OJcRfR1ag>