Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 21 Mar 2002 20:43:40 -0800 (PST)
From:      Thomas Moestl <tmm@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 8195 for review
Message-ID:  <200203220443.g2M4he116543@freefall.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://people.freebsd.org/~peter/p4db/chv.cgi?CH=8195

Change 8195 by tmm@tmm_sparc64 on 2002/03/21 20:42:53

	Integ with sparc64-tmm:
	- allow overcommitting DVMA memory
	- clean up busdma code
	- bug fixes for error paths
	- cleanups

Affected files ...

... //depot/projects/sparc64/sys/sparc64/include/bus.h#11 integrate
... //depot/projects/sparc64/sys/sparc64/include/bus_private.h#1 branch
... //depot/projects/sparc64/sys/sparc64/include/iommuvar.h#12 integrate
... //depot/projects/sparc64/sys/sparc64/pci/psycho.c#25 integrate
... //depot/projects/sparc64/sys/sparc64/sbus/sbus.c#5 integrate
... //depot/projects/sparc64/sys/sparc64/sparc64/bus_machdep.c#14 integrate
... //depot/projects/sparc64/sys/sparc64/sparc64/cache.c#15 integrate
... //depot/projects/sparc64/sys/sparc64/sparc64/iommu.c#18 integrate
... //depot/projects/sparc64/sys/sparc64/sparc64/pv.c#27 integrate

Differences ...

==== //depot/projects/sparc64/sys/sparc64/include/bus.h#11 (text+ko) ====

@@ -827,19 +827,22 @@
 	/*
 	 * DMA mapping methods.
 	 */
-	int	(*dmamap_create)(bus_dma_tag_t, int, bus_dmamap_t *);
-	int	(*dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
-	int	(*dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
-	    bus_size_t, bus_dmamap_callback_t *, void *, int);
-	void	(*dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
-	void	(*dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
+	int	(*dmamap_create)(bus_dma_tag_t, bus_dma_tag_t, int,
+	    bus_dmamap_t *);
+	int	(*dmamap_destroy)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+	int	(*dmamap_load)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+	    void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
+	void	(*dmamap_unload)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+	void	(*dmamap_sync)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
 	    bus_dmasync_op_t);
 
 	/*
 	 * DMA memory utility functions.
 	 */
-	int	(*dmamem_alloc)(bus_dma_tag_t, void **, int, bus_dmamap_t *);
-	void	(*dmamem_free)(bus_dma_tag_t, void *, bus_dmamap_t);
+	int	(*dmamem_alloc)(bus_dma_tag_t, bus_dma_tag_t, void **, int,
+	    bus_dmamap_t *);
+	void	(*dmamem_free)(bus_dma_tag_t, bus_dma_tag_t, void *,
+	    bus_dmamap_t);
 };
 
 /*
@@ -859,29 +862,93 @@
 int sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp);
 void sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map);
 
+static __inline int
+sparc64_dmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, int f,
+    bus_dmamap_t *p)
+{
+	bus_dma_tag_t lt;
+
+	for (lt = pt; lt->dmamap_create == NULL; lt = lt->parent)
+		;
+	return ((*lt->dmamap_create)(lt, dt, f, p));
+}
 #define	bus_dmamap_create(t, f, p)					\
-	(*(t)->dmamap_create)((t), (f), (p))
+	sparc64_dmamap_create((t), (t), (f), (p))
+
+static __inline int
+sparc64_dmamap_destroy(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
+{
+	bus_dma_tag_t lt;
+
+	for (lt = pt; lt->dmamap_destroy == NULL; lt = lt->parent)
+		;
+	return ((*lt->dmamap_destroy)(lt, dt, p));
+}
 #define	bus_dmamap_destroy(t, p)					\
-	(*(t)->dmamap_destroy)((t), (p))
+	sparc64_dmamap_destroy((t), (t), (p))
+
+static __inline int
+sparc64_dmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
+    void *p, bus_size_t s, bus_dmamap_callback_t *cb, void *cba, int f)
+{
+	bus_dma_tag_t lt;
+
+	for (lt = pt; lt->dmamap_load == NULL; lt = lt->parent)
+		;
+	return ((*lt->dmamap_load)(lt, dt, m, p, s, cb, cba, f));
+}
 #define	bus_dmamap_load(t, m, p, s, cb, cba, f)				\
-	(*(t)->dmamap_load)((t), (m), (p), (s), (cb), (cba), (f))
+	sparc64_dmamap_load((t), (t), (m), (p), (s), (cb), (cba), (f))
+
+static __inline void
+sparc64_dmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
+{
+	bus_dma_tag_t lt;
+
+	for (lt = pt; lt->dmamap_unload == NULL; lt = lt->parent)
+		;
+	(*lt->dmamap_unload)(lt, dt, p);
+}
 #define	bus_dmamap_unload(t, p)						\
-	(*(t)->dmamap_unload)((t), (p))
+	sparc64_dmamap_unload((t), (t), (p))
+
+static __inline void
+sparc64_dmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
+    bus_dmasync_op_t op)
+{
+	bus_dma_tag_t lt;
+
+	for (lt = pt; lt->dmamap_sync == NULL; lt = lt->parent)
+		;
+	(*lt->dmamap_sync)(lt, dt, m, op);
+}
 #define	bus_dmamap_sync(t, m, op)					\
-	(void)((t)->dmamap_sync ?					\
-	    (*(t)->dmamap_sync)((t), (m), (op)) : (void)0)
+	sparc64_dmamap_sync((t), (t), (m), (op))
+
+static __inline int
+sparc64_dmamem_alloc(bus_dma_tag_t pt, bus_dma_tag_t dt, void **v, int f,
+    bus_dmamap_t *m)
+{
+	bus_dma_tag_t lt;
 
+	for (lt = pt; lt->dmamem_alloc == NULL; lt = lt->parent)
+		;
+	return ((*lt->dmamem_alloc)(lt, dt, v, f, m));
+}
 #define	bus_dmamem_alloc(t, v, f, m)					\
-	(*(t)->dmamem_alloc)((t), (v), (f), (m))
+	sparc64_dmamem_alloc((t), (t), (v), (f), (m))
+
+static __inline void
+sparc64_dmamem_free(bus_dma_tag_t pt, bus_dma_tag_t dt, void *v,
+    bus_dmamap_t m)
+{
+	bus_dma_tag_t lt;
+
+	for (lt = pt; lt->dmamem_free == NULL; lt = lt->parent)
+		;
+	(*lt->dmamem_free)(lt, dt, v, m);
+}
 #define	bus_dmamem_free(t, v, m)					\
-	(*(t)->dmamem_free)((t), (v), (m))
-
-struct bus_dmamap {
-	bus_dma_tag_t	dmat;
-	void		*buf;		/* unmapped buffer pointer */
-	bus_size_t	buflen;		/* unmapped buffer length */
-	bus_addr_t	start;		/* start of mapped region */
-	struct resource *res;		/* associated resource */
-};
+	sparc64_dmamem_free((t), (t), (v), (m))
 
 #endif /* !_MACHINE_BUS_H_ */

==== //depot/projects/sparc64/sys/sparc64/include/iommuvar.h#12 (text+ko) ====

@@ -82,19 +82,19 @@
 void iommu_remove(struct iommu_state *, vm_offset_t, size_t);
 void iommu_decode_fault(struct iommu_state *, vm_offset_t);
 
-int iommu_dvmamap_create(bus_dma_tag_t, struct iommu_state *, int,
-    bus_dmamap_t *);
-int iommu_dvmamap_destroy(bus_dma_tag_t, struct iommu_state *,
+int iommu_dvmamap_create(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
+    int, bus_dmamap_t *);
+int iommu_dvmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
     bus_dmamap_t);
-int iommu_dvmamap_load(bus_dma_tag_t, struct iommu_state *, bus_dmamap_t,
-    void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
-void iommu_dvmamap_unload(bus_dma_tag_t, struct iommu_state *,
+int iommu_dvmamap_load(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
+    bus_dmamap_t, void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
+void iommu_dvmamap_unload(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
     bus_dmamap_t);
-void iommu_dvmamap_sync(bus_dma_tag_t, struct iommu_state *, bus_dmamap_t,
-    bus_dmasync_op_t);
-int iommu_dvmamem_alloc(bus_dma_tag_t, struct iommu_state *, void **, int,
-    bus_dmamap_t *);
-void iommu_dvmamem_free(bus_dma_tag_t, struct iommu_state *, void *,
-    bus_dmamap_t);
+void iommu_dvmamap_sync(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
+    bus_dmamap_t, bus_dmasync_op_t);
+int iommu_dvmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
+    void **, int, bus_dmamap_t *);
+void iommu_dvmamem_free(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
+    void *, bus_dmamap_t);
 
 #endif /* !_MACHINE_IOMMUVAR_H_ */

==== //depot/projects/sparc64/sys/sparc64/pci/psycho.c#25 (text+ko) ====

@@ -99,14 +99,18 @@
  * bus space and bus dma support for UltraSPARC `psycho'.  note that most
  * of the bus dma support is provided by the iommu dvma controller.
  */
-static int psycho_dmamap_create(bus_dma_tag_t, int, bus_dmamap_t *);
-static int psycho_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
-static int psycho_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
-    bus_dmamap_callback_t *, void *, int);
-static void psycho_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-static void psycho_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
-static int psycho_dmamem_alloc(bus_dma_tag_t, void **, int, bus_dmamap_t *);
-static void psycho_dmamem_free(bus_dma_tag_t, void *, bus_dmamap_t);
+static int psycho_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
+    bus_dmamap_t *);
+static int psycho_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+static int psycho_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+    void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
+static void psycho_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+static void psycho_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+    bus_dmasync_op_t);
+static int psycho_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
+    bus_dmamap_t *);
+static void psycho_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
+    bus_dmamap_t);
 
 /*
  * autoconfiguration
@@ -1195,7 +1199,7 @@
 	if (type == SYS_RES_IRQ)
 		return (bus_deactivate_resource(bus, type, rid, r));
 	if (type == SYS_RES_MEMORY) {
-		sparc64_bus_mem_unmap(rman_get_bustag(r), rman_get_size(r));
+		sparc64_bus_mem_unmap(rman_get_virtual(r), rman_get_size(r));
 		rman_set_virtual(r, NULL);
 	}
 	return (rman_deactivate_resource(r));
@@ -1274,69 +1278,74 @@
  * hooks into the iommu dvma calls.
  */
 static int
-psycho_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, bus_dmamap_t *mapp)
+psycho_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
+    int flags, bus_dmamap_t *mapp)
 {
 	struct psycho_softc *sc;
 
-	sc = (struct psycho_softc *)dmat->cookie;
-	return (iommu_dvmamem_alloc(dmat, sc->sc_is, vaddr, flags, mapp));
+	sc = (struct psycho_softc *)pdmat->cookie;
+	return (iommu_dvmamem_alloc(pdmat, ddmat, sc->sc_is, vaddr, flags,
+	    mapp));
 }
 
 static void
-psycho_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+psycho_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
+    bus_dmamap_t map)
 {
 	struct psycho_softc *sc;
 
-	sc = (struct psycho_softc *)dmat->cookie;
-	iommu_dvmamem_free(dmat, sc->sc_is, vaddr, map);
+	sc = (struct psycho_softc *)pdmat->cookie;
+	iommu_dvmamem_free(pdmat, ddmat, sc->sc_is, vaddr, map);
 }
 
 static int
-psycho_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+psycho_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
+    bus_dmamap_t *mapp)
 {
 	struct psycho_softc *sc;
 
-	sc = (struct psycho_softc *)dmat->cookie;
-	return (iommu_dvmamap_create(dmat, sc->sc_is, flags, mapp));
+	sc = (struct psycho_softc *)pdmat->cookie;
+	return (iommu_dvmamap_create(pdmat, ddmat, sc->sc_is, flags, mapp));
 
 }
 
 static int
-psycho_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+psycho_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
+    bus_dmamap_t map)
 {
 	struct psycho_softc *sc;
 
-	sc = (struct psycho_softc *)dmat->cookie;
-	return (iommu_dvmamap_destroy(dmat, sc->sc_is, map));
+	sc = (struct psycho_softc *)pdmat->cookie;
+	return (iommu_dvmamap_destroy(pdmat, ddmat, sc->sc_is, map));
 }
 
 static int
-psycho_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
-    bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
-    int flags)
+psycho_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
+    void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
+    void *callback_arg, int flags)
 {
 	struct psycho_softc *sc;
 
-	sc = (struct psycho_softc *)dmat->cookie;
-	return (iommu_dvmamap_load(dmat, sc->sc_is, map, buf, buflen, callback,
-	    callback_arg, flags));
+	sc = (struct psycho_softc *)pdmat->cookie;
+	return (iommu_dvmamap_load(pdmat, ddmat, sc->sc_is, map, buf, buflen,
+	    callback, callback_arg, flags));
 }
 
 static void
-psycho_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+psycho_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
 {
 	struct psycho_softc *sc;
 
-	sc = (struct psycho_softc *)dmat->cookie;
-	iommu_dvmamap_unload(dmat, sc->sc_is, map);
+	sc = (struct psycho_softc *)pdmat->cookie;
+	iommu_dvmamap_unload(pdmat, ddmat, sc->sc_is, map);
 }
 
 static void
-psycho_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+psycho_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
     bus_dmasync_op_t op)
 {
 	struct psycho_softc *sc;
 
-	sc = (struct psycho_softc *)dmat->cookie;
-	iommu_dvmamap_sync(dmat, sc->sc_is, map, op);
+	sc = (struct psycho_softc *)pdmat->cookie;
+	iommu_dvmamap_sync(pdmat, ddmat, sc->sc_is, map, op);
 }

==== //depot/projects/sparc64/sys/sparc64/sbus/sbus.c#5 (text+ko) ====

@@ -231,14 +231,18 @@
 /*
  * DVMA routines
  */
-static int sbus_dmamap_create(bus_dma_tag_t, int, bus_dmamap_t *);
-static int sbus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
-static int sbus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
-    bus_dmamap_callback_t *, void *, int);
-static void sbus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-static void sbus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
-static int sbus_dmamem_alloc(bus_dma_tag_t, void **, int, bus_dmamap_t *);
-static void sbus_dmamem_free(bus_dma_tag_t, void *, bus_dmamap_t);
+static int sbus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
+    bus_dmamap_t *);
+static int sbus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+static int sbus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
+    bus_size_t, bus_dmamap_callback_t *, void *, int);
+static void sbus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+static void sbus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+    bus_dmasync_op_t);
+static int sbus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
+    bus_dmamap_t *);
+static void sbus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
+    bus_dmamap_t);
 
 static device_method_t sbus_methods[] = {
 	/* Device interface */
@@ -909,62 +913,66 @@
 }
 
 static int
-sbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+sbus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
+    bus_dmamap_t *mapp)
 {
-	struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
+	struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
 
-	return (iommu_dvmamap_create(dmat, &sc->sc_is, flags, mapp));
+	return (iommu_dvmamap_create(pdmat, ddmat, &sc->sc_is, flags, mapp));
 
 }
 
 static int
-sbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+sbus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
 {
-	struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
+	struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
 
-	return (iommu_dvmamap_destroy(dmat, &sc->sc_is, map));
+	return (iommu_dvmamap_destroy(pdmat, ddmat, &sc->sc_is, map));
 }
 
 static int
-sbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
-    bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
-    int flags)
+sbus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
+    void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
+    void *callback_arg, int flags)
 {
-	struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
+	struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
 
-	return (iommu_dvmamap_load(dmat, &sc->sc_is, map, buf, buflen, callback,
-	    callback_arg, flags));
+	return (iommu_dvmamap_load(pdmat, ddmat, &sc->sc_is, map, buf, buflen,
+	    callback, callback_arg, flags));
 }
 
 static void
-sbus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+sbus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
 {
-	struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
+	struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
 
-	iommu_dvmamap_unload(dmat, &sc->sc_is, map);
+	iommu_dvmamap_unload(pdmat, ddmat, &sc->sc_is, map);
 }
 
 static void
-sbus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+sbus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
     bus_dmasync_op_t op)
 {
-	struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
+	struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
 
-	iommu_dvmamap_sync(dmat, &sc->sc_is, map, op);
+	iommu_dvmamap_sync(pdmat, ddmat, &sc->sc_is, map, op);
 }
 
 static int
-sbus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, bus_dmamap_t *mapp)
+sbus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
+    int flags, bus_dmamap_t *mapp)
 {
-	struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
+	struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
 
-	return (iommu_dvmamem_alloc(dmat, &sc->sc_is, vaddr, flags, mapp));
+	return (iommu_dvmamem_alloc(pdmat, ddmat, &sc->sc_is, vaddr, flags,
+		    mapp));
 }
 
 static void
-sbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+sbus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
+    bus_dmamap_t map)
 {
-	struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
+	struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
 
-	iommu_dvmamem_free(dmat, &sc->sc_is, vaddr, map);
+	iommu_dvmamem_free(pdmat, ddmat, &sc->sc_is, vaddr, map);
 }

==== //depot/projects/sparc64/sys/sparc64/sparc64/bus_machdep.c#14 (text+ko) ====

@@ -124,6 +124,7 @@
 
 #include <machine/asi.h>
 #include <machine/bus.h>
+#include <machine/bus_private.h>
 #include <machine/cache.h>
 #include <machine/pmap.h>
 #include <machine/smp.h>
@@ -153,16 +154,25 @@
  * Note: there is no support for bounce buffers yet.
  */
 
-static int nexus_dmamap_create(bus_dma_tag_t, int, bus_dmamap_t *);
-static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
-static int nexus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
-    bus_dmamap_callback_t *, void *, int);
-static void nexus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
-static void nexus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
-static int nexus_dmamem_alloc(bus_dma_tag_t, void **, int, bus_dmamap_t *);
-static void nexus_dmamem_free(bus_dma_tag_t, void *, bus_dmamap_t);
+static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
+    bus_dmamap_t *);
+static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+    void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
+static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
+static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
+    bus_dmasync_op_t);
+static int nexus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
+    bus_dmamap_t *);
+static void nexus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
+    bus_dmamap_t);
 
-
+/*
+ * Since there is now way for a device to obtain a dma tag from its parent
+ * we use this kluge to handle different the different supported bus systems.
+ * The sparc64_root_dma_tag is used as parent for tags that have none, so that
+ * the correct methods will be used.
+ */
 bus_dma_tag_t sparc64_root_dma_tag;
 
 /*
@@ -175,7 +185,7 @@
     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
 {
 
-	bus_dma_tag_t newtag, eparent;
+	bus_dma_tag_t newtag;
 
 	/* Return a NULL tag on failure */
 	*dmat = NULL;
@@ -184,11 +194,7 @@
 	if (newtag == NULL)
 		return (ENOMEM);
 
-	/* Ugh... */
-	eparent = parent != NULL ? parent : sparc64_root_dma_tag;
-	memcpy(newtag, eparent, sizeof(*newtag));
-	if (parent != NULL)
-		newtag->parent = parent;
+	newtag->parent = parent != NULL ? parent : sparc64_root_dma_tag;
 	newtag->alignment = alignment;
 	newtag->boundary = boundary;
 	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
@@ -199,9 +205,17 @@
 	newtag->nsegments = nsegments;
 	newtag->maxsegsz = maxsegsz;
 	newtag->flags = flags;
-	newtag->ref_count = 1; /* Count ourself */
+	newtag->ref_count = 1; /* Count ourselves */
 	newtag->map_count = 0;
-	
+
+	newtag->dmamap_create = NULL;
+	newtag->dmamap_destroy = NULL;
+	newtag->dmamap_load = NULL;
+	newtag->dmamap_unload = NULL;
+	newtag->dmamap_sync = NULL;
+	newtag->dmamem_alloc = NULL;
+	newtag->dmamem_free = NULL;
+
 	/* Take into account any restrictions imposed by our parent tag */
 	if (parent != NULL) {
 		newtag->lowaddr = ulmin(parent->lowaddr, newtag->lowaddr);
@@ -211,10 +225,9 @@
 		 *     all the way up the inheritence chain.
 		 */
 		newtag->boundary = ulmax(parent->boundary, newtag->boundary);
-		if (parent != NULL)
-			parent->ref_count++;
 	}
-	
+	newtag->parent->ref_count++;
+
 	*dmat = newtag;
 	return (0);
 }
@@ -222,16 +235,15 @@
 int
 bus_dma_tag_destroy(bus_dma_tag_t dmat)
 {
+	bus_dma_tag_t parent;
 
 	if (dmat != NULL) {
 		if (dmat->map_count != 0)
 			return (EBUSY);
-
 		while (dmat != NULL) {
-			bus_dma_tag_t parent;
-
 			parent = dmat->parent;
 			dmat->ref_count--;
+			printf("tag_destroy\n");
 			if (dmat->ref_count == 0) {
 				free(dmat, M_DEVBUF);
 				/*
@@ -252,12 +264,13 @@
  * DMA map creation functions.
  */
 static int
-nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+nexus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
+    bus_dmamap_t *mapp)
 {
 
 	/* Not much to do...? */
 	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_WAITOK | M_ZERO);
-	dmat->map_count++;
+	ddmat->map_count++;
 	return (0);
 }
 
@@ -266,11 +279,11 @@
  * DMA map destruction functions.
  */
 static int
-nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
 {
 
 	free(map, M_DEVBUF);
-	dmat->map_count--;
+	ddmat->map_count--;
 	return (0);
 }
 
@@ -287,14 +300,14 @@
  * bypass DVMA.
  */
 static int
-nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
-    bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
-    int flags)
+nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
+    void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
+    void *callback_arg, int flags)
 {
 	vm_offset_t vaddr;
 	vm_offset_t paddr;
 #ifdef __GNUC__
-	bus_dma_segment_t dm_segments[dmat->nsegments];
+	bus_dma_segment_t dm_segments[ddmat->nsegments];
 #else
 	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
 #endif
@@ -331,7 +344,7 @@
 			/* Go to the next segment */
 			sg++;
 			seg++;
-			if (seg > dmat->nsegments)
+			if (seg > ddmat->nsegments)
 				break;
 			sg->ds_addr = paddr;
 			sg->ds_len = size;
@@ -357,7 +370,7 @@
  * bus-specific DMA map unload functions.
  */
 static void
-nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
 {
 
 	/* Nothing to do...? */
@@ -368,7 +381,8 @@
  * by bus-specific DMA map synchronization functions.
  */
 static void
-nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
+    bus_dmasync_op_t op)
 {
 
 	/*
@@ -412,7 +426,7 @@
 	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_WAITOK | M_ZERO);
 	if (*mapp == NULL)
 		return (ENOMEM);
-	
+
 	dmat->map_count++;
 	return (0);
 }
@@ -430,12 +444,12 @@
  * by bus-specific DMA memory allocation functions.
  */
 static int
-nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
-    bus_dmamap_t *mapp)
+nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
+    int flags, bus_dmamap_t *mapp)
 {
-	
-	if ((dmat->maxsize <= PAGE_SIZE)) {
-		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
+
+	if ((ddmat->maxsize <= PAGE_SIZE)) {
+		*vaddr = malloc(ddmat->maxsize, M_DEVBUF,
 		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
 	} else {
 		/*
@@ -443,10 +457,11 @@
 		 * and handles multi-seg allocations.  Nobody is doing multi-seg
 		 * allocations yet though.
 		 */
-		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
+		*vaddr = contigmalloc(ddmat->maxsize, M_DEVBUF,
 		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
-		    0ul, dmat->lowaddr, dmat->alignment ? dmat->alignment : 1UL,
-		    dmat->boundary);
+		    0ul, ddmat->lowaddr,
+		    ddmat->alignment ? ddmat->alignment : 1UL,
+		    ddmat->boundary);
 	}
 	if (*vaddr == NULL) {
 		free(*mapp, M_DEVBUF);
@@ -460,14 +475,15 @@
  * bus-specific DMA memory free functions.
  */
 static void
-nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+nexus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
+    bus_dmamap_t map)
 {
 
-	sparc64_dmamem_free_map(dmat, map);
-	if ((dmat->maxsize <= PAGE_SIZE))
+	sparc64_dmamem_free_map(ddmat, map);
+	if ((ddmat->maxsize <= PAGE_SIZE))
 		free(vaddr, M_DEVBUF);
 	else
-		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
+		contigfree(vaddr, ddmat->maxsize, M_DEVBUF);
 }
 
 struct bus_dma_tag nexus_dmatag = {
@@ -567,7 +583,7 @@
 	for (va = sva; va < endva; va += PAGE_SIZE)
 		pmap_kremove(va);
 	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
-	kmem_free(kernel_map, va, size);
+	kmem_free(kernel_map, sva, size);
 	return (0);
 }
 

==== //depot/projects/sparc64/sys/sparc64/sparc64/cache.c#15 (text+ko) ====

@@ -395,7 +395,6 @@
 ecache_inval_phys(vm_offset_t start, vm_offset_t end)
 {
 	vm_offset_t addr, eca;
-	critical_t c;
 	u_long tag, j;
 
 	if (!cache.c_enabled)
@@ -407,7 +406,6 @@
 			/* XXX: guesswork... */
 			eca = (addr & (cache.ec_size - 1)) |
 			    (j << (cache.ec_l2set));
-			c = critical_enter();
 			/*
 			 * Retrieve the tag:
 			 * A read from the appropriate VA in ASI_ECACHE_R

==== //depot/projects/sparc64/sys/sparc64/sparc64/iommu.c#18 (text+ko) ====

@@ -123,6 +123,7 @@
 #include <vm/pmap.h>
 
 #include <machine/bus.h>
+#include <machine/bus_private.h>
 #include <machine/iommureg.h>
 #include <machine/pmap.h>
 #include <machine/resource.h>
@@ -158,12 +159,44 @@
 	bus_space_write_8((is)->is_bustag, (is)->is_bushandle, 		\
 	    (is)->reg + (off), (v))
 
+/*
+ * Always overallocate one page; this is needed to handle alignment of the
+ * buffer, so it makes sense using a lazy allocation scheme.
+ */
+#define	IOMMU_SIZE_ROUNDUP(sz)						\
+	(round_io_page(sz) + IO_PAGE_SIZE)
+
 static	int iommu_strbuf_flush_done(struct iommu_state *);
 #ifdef IOMMU_DIAG
 static 	void iommu_diag(struct iommu_state *, vm_offset_t va);
 #endif
 
 /*
+ * LRU queue handling for lazy resource allocation.
+ */
+static STAILQ_HEAD(, bus_dmamap) iommu_maplruq =
+   STAILQ_HEAD_INITIALIZER(iommu_maplruq);
+
+static __inline void
+iommu_map_insq(bus_dmamap_t map)
+{
+
+	if (!map->onq && map->dvmaresv != 0) {
+		STAILQ_INSERT_TAIL(&iommu_maplruq, map, maplruq);
+		map->onq = 1;
+	}
+}
+
+static __inline void
+iommu_map_remq(bus_dmamap_t map)
+{
+
+	if (map->onq)
+		STAILQ_REMOVE(&iommu_maplruq, map, bus_dmamap, maplruq);
+	map->onq = 0;
+}
+
+/*
  * initialise the UltraSPARC IOMMU (SBUS or PCI):
  *	- allocate and setup the iotsb.
  *	- enable the IOMMU
@@ -457,55 +490,50 @@
 
 /* Allocate DVMA virtual memory for a map. */
 static int
-iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
+iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
+    bus_size_t size)
 {
-	bus_size_t align, bound, sgsize, maxsize;
+	bus_size_t align, bound, sgsize;
 
 	/*
-	 * Choose a maximum length. If a boundary is specified, a map cannot
-	 * be larger than it.
-	 * XXX: we end up overallocating quite a lot here, since we only know
-	 * an upper bound for the tag, but not for the map, so we need to
-	 * allocate the maximum size to each map. Usually, plenty of DVMA
-	 * virtual memory is available (the minimum is 8MB), so this should
-	 * not be much of a poblem right now.
+	 * If a boundary is specified, a map cannot be larger than it; however
+	 * we do not clip currently, as that does not play well with the lazy
+	 * allocation code.
+	 * Alignment to a page boundary is always enforced.
 	 */
-	if (t->boundary != 0)
-		maxsize = ulmin(t->maxsize, t->boundary);
-	else
-		maxsize = t->maxsize;
-	/* Alignment to a page boundary is always enforced. */
 	align = (t->alignment + IO_PAGE_MASK) >> IO_PAGE_SHIFT;
-	sgsize = round_io_page(maxsize) >> IO_PAGE_SHIFT;
+	sgsize = round_io_page(size) >> IO_PAGE_SHIFT;
 	if (t->boundary > 0 && t->boundary < IO_PAGE_SIZE)
 		panic("iommu_dvmamap_load: illegal boundary specified");
 	bound = ulmax(t->boundary >> IO_PAGE_SHIFT, 1);
+	map->dvmaresv = 0;
 	map->res = rman_reserve_resource_bound(&is->is_dvma_rman, 0L,
 	    t->lowaddr, sgsize, bound >> IO_PAGE_SHIFT,
 	    RF_ACTIVE | rman_make_alignment_flags(align), NULL);
-	if (map->res == NULL) {
-		printf("DVMA allocation failed!\n");	/* XXX */
+	if (map->res == NULL)
 		return (ENOMEM);
-	}
 
 	map->start = rman_get_start(map->res) * IO_PAGE_SIZE;
+	map->dvmaresv = size;
+	iommu_map_insq(map);
 	return (0);
 }
 
 /* Free DVMA virtual memory for a map. */
 static void
-iommu_dvma_vfree(bus_dma_tag_t t, bus_dmamap_t map)
+iommu_dvma_vfree(bus_dmamap_t map)
 {
 
-	if (rman_release_resource(map->res) != 0) {
+	iommu_map_remq(map);
+	if (map->res != NULL && rman_release_resource(map->res) != 0)
 		printf("warning: DVMA space lost\n");
-	}
 	map->res = NULL;
+	map->dvmaresv = 0;
 }
 
 int
-iommu_dvmamem_alloc(bus_dma_tag_t t, struct iommu_state *is, void **vaddr,
-    int flags, bus_dmamap_t *mapp)
+iommu_dvmamem_alloc(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
+    void **vaddr, int flags, bus_dmamap_t *mapp)
 {
 	int error;
 
@@ -513,62 +541,63 @@
 	 * XXX: This will break for 32 bit transfers on machines with more than
 	 * 16G (2 << 34 bytes) of memory.
 	 */
-	if ((error = sparc64_dmamem_alloc_map(t, mapp)) != 0)
+	if ((error = sparc64_dmamem_alloc_map(dt, mapp)) != 0)
 		return (error);
-	if ((*vaddr = malloc(t->maxsize, M_IOMMU,
+	if ((*vaddr = malloc(dt->maxsize, M_IOMMU,
 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
 		error = ENOMEM;
 		goto failm;
 	}
-	if ((error = iommu_dvma_valloc(t, is, *mapp)) != 0)
-		goto failv;
+	/*
+	 * Try to preallocate DVMA memory. If this fails, it is retried at load
+	 * time.
+	 */
+	iommu_dvma_valloc(dt, is, *mapp, IOMMU_SIZE_ROUNDUP(dt->maxsize));
 	return (0);
 
-failv:
-	free(*vaddr, M_IOMMU);
 failm:
-	sparc64_dmamem_free_map(t, *mapp);
+	sparc64_dmamem_free_map(dt, *mapp);
 	return (error);
 }
 
 void
-iommu_dvmamem_free(bus_dma_tag_t t, struct iommu_state *is, void *vaddr,
-    bus_dmamap_t map)
+iommu_dvmamem_free(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
+    void *vaddr, bus_dmamap_t map)
 {
 
-	iommu_dvma_vfree(t, map);
-	sparc64_dmamem_free_map(t, map);
+	iommu_dvma_vfree(map);
+	sparc64_dmamem_free_map(dt, map);
 	free(vaddr, M_IOMMU);
 }
 
 int
-iommu_dvmamap_create(bus_dma_tag_t t, struct iommu_state *is, int flags,
-    bus_dmamap_t *mapp)
+iommu_dvmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
+    int flags, bus_dmamap_t *mapp)
 {
 	int error;
 
-	if ((error = bus_dmamap_create(t->parent, flags, mapp)) != 0)
+	if ((error = sparc64_dmamap_create(pt->parent, dt, flags, mapp)) != 0)
 		return (error);
+	KASSERT((*mapp)->res == NULL,
+	    ("iommu_dvmamap_create: hierarchy botched"));
 	/*
-	 * XXX: If already allocated, skip (this can happen in tag hierarchies
-	 * where the parent is an iommu tag, too).
+	 * Preallocate DMVA memory; if this fails now, it is retried at load
+	 * time.
+	 * Clamp preallocation to BUS_SPACE_MAXSIZE. In some situations we can
+	 * handle more; that case is handled by reallocating at map load time.
 	 */
-	if ((*mapp)->res == NULL &&
-	    (error = iommu_dvma_valloc(t, is, *mapp)) != 0) {
-		bus_dmamap_destroy(t->parent, *mapp);
-		return (error);
-	}
+	iommu_dvma_valloc(dt, is, *mapp,
+	    ulmin(IOMMU_SIZE_ROUNDUP(dt->maxsize), BUS_SPACE_MAXSIZE));
 	return (0);
 }
 
 int
-iommu_dvmamap_destroy(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
+iommu_dvmamap_destroy(bus_dma_tag_t pt, bus_dma_tag_t dt,
+    struct iommu_state *is, bus_dmamap_t map)
 {
 
-	/* XXX: if already freed, skip. */
-	if (map->res != NULL)
-		iommu_dvma_vfree(t, map);
-	return (bus_dmamap_destroy(t->parent, map));
+	iommu_dvma_vfree(map);
+	return (sparc64_dmamap_destroy(pt->parent, dt, map));
 }
 
 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
@@ -577,16 +606,17 @@
  * IOMMU DVMA operations, common to SBUS and PCI.
  */
 int
-iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
-    void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
-    int flags)
+iommu_dvmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
+    bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
+    void *cba, int flags)
 {
 #ifdef __GNUC__
-	bus_dma_segment_t sgs[t->nsegments];
+	bus_dma_segment_t sgs[dt->nsegments];
 #else
 	bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
 #endif
-	bus_size_t sgsize;
+	bus_dmamap_t tm;
+	bus_size_t sgsize, fsize, maxsize;
 	vm_offset_t curaddr;
 	u_long dvmaddr;
 	vm_offset_t vaddr;
@@ -597,15 +627,52 @@
 #ifdef DIAGNOSTIC
 		printf("iommu_dvmamap_load: map still in use\n");
 #endif
-		bus_dmamap_unload(t, map);
+		bus_dmamap_unload(dt, map);
 	}
-	if (buflen > t->maxsize) {
+	if (buflen > dt->maxsize) {
 		DPRINTF(IDB_BUSDMA,
 		    ("iommu_dvmamap_load(): error %d > %d -- "

>>> TRUNCATED FOR MAIL (1000 lines) <<<

To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe p4-projects" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200203220443.g2M4he116543>