Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 19 May 2015 15:25:48 +0000 (UTC)
From:      Ruslan Bukin <br@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r283112 - in head: lib/libpmc sys/arm/arm sys/arm64/arm64 sys/arm64/include sys/conf sys/dev/hwpmc sys/sys
Message-ID:  <201505191525.t4JFPm9e009810@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: br
Date: Tue May 19 15:25:47 2015
New Revision: 283112
URL: https://svnweb.freebsd.org/changeset/base/283112

Log:
  Add Performance Monitoring Counters support for AArch64.
  Family-common and CPU-specific counters implemented.
  
  Supported CPUs: ARM Cortex A53/57/72.
  
  Reviewed by:	andrew, bz, emaste, gnn, jhb
  Sponsored by:	ARM Limited
  Differential Revision:	https://reviews.freebsd.org/D2555

Added:
  head/sys/dev/hwpmc/hwpmc_arm64.c   (contents, props changed)
  head/sys/dev/hwpmc/hwpmc_arm64.h   (contents, props changed)
  head/sys/dev/hwpmc/hwpmc_arm64_md.c   (contents, props changed)
Modified:
  head/lib/libpmc/libpmc.c
  head/sys/arm/arm/pmu.c
  head/sys/arm64/arm64/intr_machdep.c
  head/sys/arm64/include/armreg.h
  head/sys/arm64/include/pmc_mdep.h
  head/sys/conf/files.arm64
  head/sys/dev/hwpmc/pmc_events.h
  head/sys/sys/pmc.h

Modified: head/lib/libpmc/libpmc.c
==============================================================================
--- head/lib/libpmc/libpmc.c	Tue May 19 14:49:31 2015	(r283111)
+++ head/lib/libpmc/libpmc.c	Tue May 19 15:25:47 2015	(r283112)
@@ -82,6 +82,10 @@ static int xscale_allocate_pmc(enum pmc_
 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
     struct pmc_op_pmcallocate *_pmc_config);
 #endif
+#if defined(__aarch64__)
+static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+    struct pmc_op_pmcallocate *_pmc_config);
+#endif
 #if defined(__mips__)
 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
 			     struct pmc_op_pmcallocate *_pmc_config);
@@ -158,6 +162,7 @@ PMC_CLASSDEP_TABLE(p5, P5);
 PMC_CLASSDEP_TABLE(p6, P6);
 PMC_CLASSDEP_TABLE(xscale, XSCALE);
 PMC_CLASSDEP_TABLE(armv7, ARMV7);
+PMC_CLASSDEP_TABLE(armv8, ARMV8);
 PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
 PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
 PMC_CLASSDEP_TABLE(octeon, OCTEON);
@@ -263,6 +268,16 @@ static const struct pmc_event_descr west
 	__PMC_EV_ALIAS_WESTMEREUC()
 };
 
+static const struct pmc_event_descr cortex_a53_event_table[] = 
+{
+	__PMC_EV_ALIAS_ARMV8_CORTEX_A53()
+};
+
+static const struct pmc_event_descr cortex_a57_event_table[] = 
+{
+	__PMC_EV_ALIAS_ARMV8_CORTEX_A57()
+};
+
 /*
  * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
  *
@@ -294,6 +309,8 @@ PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, P
 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
 PMC_MDEP_TABLE(armv7, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
+PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
+PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K);
 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
@@ -362,6 +379,10 @@ PMC_CLASS_TABLE_DESC(xscale, XSCALE, xsc
 #endif
 PMC_CLASS_TABLE_DESC(armv7, ARMV7, armv7, armv7);
 #endif
+#if	defined(__aarch64__)
+PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
+PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
+#endif
 #if defined(__mips__)
 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
@@ -2429,6 +2450,26 @@ armv7_allocate_pmc(enum pmc_event pe, ch
 }
 #endif
 
+#if	defined(__aarch64__)
+static struct pmc_event_alias cortex_a53_aliases[] = {
+	EV_ALIAS(NULL, NULL)
+};
+static struct pmc_event_alias cortex_a57_aliases[] = {
+	EV_ALIAS(NULL, NULL)
+};
+static int
+arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
+    struct pmc_op_pmcallocate *pmc_config __unused)
+{
+	switch (pe) {
+	default:
+		break;
+	}
+
+	return (0);
+}
+#endif
+
 #if defined(__mips__)
 
 static struct pmc_event_alias mips24k_aliases[] = {
@@ -2938,6 +2979,19 @@ pmc_event_names_of_class(enum pmc_class 
 		ev = armv7_event_table;
 		count = PMC_EVENT_TABLE_SIZE(armv7);
 		break;
+	case PMC_CLASS_ARMV8:
+		switch (cpu_info.pm_cputype) {
+		default:
+		case PMC_CPU_ARMV8_CORTEX_A53:
+			ev = cortex_a53_event_table;
+			count = PMC_EVENT_TABLE_SIZE(cortex_a53);
+			break;
+		case PMC_CPU_ARMV8_CORTEX_A57:
+			ev = cortex_a57_event_table;
+			count = PMC_EVENT_TABLE_SIZE(cortex_a57);
+			break;
+		}
+		break;
 	case PMC_CLASS_MIPS24K:
 		ev = mips24k_event_table;
 		count = PMC_EVENT_TABLE_SIZE(mips24k);
@@ -3235,6 +3289,16 @@ pmc_init(void)
 		pmc_class_table[n] = &armv7_class_table_descr;
 		break;
 #endif
+#if defined(__aarch64__)
+	case PMC_CPU_ARMV8_CORTEX_A53:
+		PMC_MDEP_INIT(cortex_a53);
+		pmc_class_table[n] = &cortex_a53_class_table_descr;
+		break;
+	case PMC_CPU_ARMV8_CORTEX_A57:
+		PMC_MDEP_INIT(cortex_a57);
+		pmc_class_table[n] = &cortex_a57_class_table_descr;
+		break;
+#endif
 #if defined(__mips__)
 	case PMC_CPU_MIPS_24K:
 		PMC_MDEP_INIT(mips24k);
@@ -3446,6 +3510,19 @@ _pmc_name_of_event(enum pmc_event pe, en
 	} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
 		ev = armv7_event_table;
 		evfence = armv7_event_table + PMC_EVENT_TABLE_SIZE(armv7);
+	} else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
+		switch (cpu) {
+		case PMC_CPU_ARMV8_CORTEX_A53:
+			ev = cortex_a53_event_table;
+			evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
+			break;
+		case PMC_CPU_ARMV8_CORTEX_A57:
+			ev = cortex_a57_event_table;
+			evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
+			break;
+		default:	/* Unknown CPU type. */
+			break;
+		}
 	} else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
 		ev = mips24k_event_table;
 		evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);

Modified: head/sys/arm/arm/pmu.c
==============================================================================
--- head/sys/arm/arm/pmu.c	Tue May 19 14:49:31 2015	(r283111)
+++ head/sys/arm/arm/pmu.c	Tue May 19 15:25:47 2015	(r283112)
@@ -58,13 +58,16 @@ __FBSDID("$FreeBSD$");
 #include <machine/cpu.h>
 #include <machine/intr.h>
 
+#define	MAX_RLEN	8
+
 struct pmu_softc {
-	struct resource		*res[1];
+	struct resource		*res[MAX_RLEN];
 	device_t		dev;
-	void			*ih;
+	void			*ih[MAX_RLEN];
 };
 
 static struct ofw_compat_data compat_data[] = {
+	{"arm,armv8-pmuv3",	1},
 	{"arm,cortex-a17-pmu",	1},
 	{"arm,cortex-a15-pmu",	1},
 	{"arm,cortex-a12-pmu",	1},
@@ -81,6 +84,13 @@ static struct ofw_compat_data compat_dat
 
 static struct resource_spec pmu_spec[] = {
 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
+	{ SYS_RES_IRQ,		1,	RF_ACTIVE | RF_OPTIONAL },
+	{ SYS_RES_IRQ,		2,	RF_ACTIVE | RF_OPTIONAL },
+	{ SYS_RES_IRQ,		3,	RF_ACTIVE | RF_OPTIONAL },
+	{ SYS_RES_IRQ,		4,	RF_ACTIVE | RF_OPTIONAL },
+	{ SYS_RES_IRQ,		5,	RF_ACTIVE | RF_OPTIONAL },
+	{ SYS_RES_IRQ,		6,	RF_ACTIVE | RF_OPTIONAL },
+	{ SYS_RES_IRQ,		7,	RF_ACTIVE | RF_OPTIONAL },
 	{ -1, 0 }
 };
 
@@ -119,6 +129,7 @@ pmu_attach(device_t dev)
 {
 	struct pmu_softc *sc;
 	int err;
+	int i;
 
 	sc = device_get_softc(dev);
 	sc->dev = dev;
@@ -129,11 +140,16 @@ pmu_attach(device_t dev)
 	}
 
 	/* Setup interrupt handler */
-	err = bus_setup_intr(dev, sc->res[0], INTR_MPSAFE | INTR_TYPE_MISC,
-	    pmu_intr, NULL, NULL, &sc->ih);
-	if (err) {
-		device_printf(dev, "Unable to setup interrupt handler.\n");
-		return (ENXIO);
+	for (i = 0; i < MAX_RLEN; i++) {
+		if (sc->res[i] == NULL)
+			break;
+
+		err = bus_setup_intr(dev, sc->res[i], INTR_MPSAFE | INTR_TYPE_MISC,
+		    pmu_intr, NULL, NULL, &sc->ih[i]);
+		if (err) {
+			device_printf(dev, "Unable to setup interrupt handler.\n");
+			return (ENXIO);
+		}
 	}
 
 	return (0);

Modified: head/sys/arm64/arm64/intr_machdep.c
==============================================================================
--- head/sys/arm64/arm64/intr_machdep.c	Tue May 19 14:49:31 2015	(r283111)
+++ head/sys/arm64/arm64/intr_machdep.c	Tue May 19 15:25:47 2015	(r283112)
@@ -430,6 +430,10 @@ stray:
 
 	if (intr != NULL)
 		PIC_MASK(root_pic, intr->i_hw_irq);
+#ifdef HWPMC_HOOKS
+	if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
+		pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
+#endif
 }
 
 void

Modified: head/sys/arm64/include/armreg.h
==============================================================================
--- head/sys/arm64/include/armreg.h	Tue May 19 14:49:31 2015	(r283111)
+++ head/sys/arm64/include/armreg.h	Tue May 19 15:25:47 2015	(r283112)
@@ -212,4 +212,22 @@
 #define	DBG_MDSCR_KDE	(0x1 << 13)
 #define	DBG_MDSCR_MDE	(0x1 << 15)
 
+/* Perfomance Monitoring Counters */
+#define	PMCR_E		(1 << 0) /* Enable all counters */
+#define	PMCR_P		(1 << 1) /* Reset all counters */
+#define	PMCR_C		(1 << 2) /* Clock counter reset */
+#define	PMCR_D		(1 << 3) /* CNTR counts every 64 clk cycles */
+#define	PMCR_X		(1 << 4) /* Export to ext. monitoring (ETM) */
+#define	PMCR_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
+#define	PMCR_LC		(1 << 6) /* Long cycle count enable */
+#define	PMCR_IMP_SHIFT	24 /* Implementer code */
+#define	PMCR_IMP_MASK	(0xff << PMCR_IMP_SHIFT)
+#define	PMCR_IDCODE_SHIFT	16 /* Identification code */
+#define	PMCR_IDCODE_MASK	(0xff << PMCR_IDCODE_SHIFT)
+#define	 PMCR_IDCODE_CORTEX_A57	0x01
+#define	 PMCR_IDCODE_CORTEX_A72	0x02
+#define	 PMCR_IDCODE_CORTEX_A53	0x03
+#define	PMCR_N_SHIFT	11       /* Number of counters implemented */
+#define	PMCR_N_MASK	(0x1f << PMCR_N_SHIFT)
+
 #endif /* !_MACHINE_ARMREG_H_ */

Modified: head/sys/arm64/include/pmc_mdep.h
==============================================================================
--- head/sys/arm64/include/pmc_mdep.h	Tue May 19 14:49:31 2015	(r283111)
+++ head/sys/arm64/include/pmc_mdep.h	Tue May 19 15:25:47 2015	(r283112)
@@ -29,6 +29,14 @@
 #ifndef _MACHINE_PMC_MDEP_H_
 #define	_MACHINE_PMC_MDEP_H_
 
+#define	PMC_MDEP_CLASS_INDEX_ARMV8	1
+/*
+ * On the ARMv8 platform we support the following PMCs.
+ *
+ * ARMV8	ARM Cortex-A53/57/72 processors
+ */
+#include <dev/hwpmc/hwpmc_arm64.h>
+
 union pmc_md_op_pmcallocate {
 	uint64_t		__pad[4];
 };
@@ -39,12 +47,21 @@ union pmc_md_op_pmcallocate {
 
 #ifdef	_KERNEL
 union pmc_md_pmc {
+	struct pmc_md_arm64_pmc		pm_arm64;
 };
 
-#define	PMC_TRAPFRAME_TO_PC(TF)	(0)	/* Stubs */
-#define	PMC_TRAPFRAME_TO_FP(TF)	(0)
-#define	PMC_TRAPFRAME_TO_SP(TF)	(0)
-
+#define	PMC_IN_KERNEL_STACK(S,START,END)		\
+	((S) >= (START) && (S) < (END))
+#define	PMC_IN_KERNEL(va)	INKERNEL((va))
+#define	PMC_IN_USERSPACE(va) ((va) <= VM_MAXUSER_ADDRESS)
+#define	PMC_TRAPFRAME_TO_PC(TF)		((TF)->tf_lr)
+#define	PMC_TRAPFRAME_TO_FP(TF)		((TF)->tf_x[29])
+
+/*
+ * Prototypes
+ */
+struct pmc_mdep *pmc_arm64_initialize(void);
+void	pmc_arm64_finalize(struct pmc_mdep *_md);
 #endif /* _KERNEL */
 
 #endif /* !_MACHINE_PMC_MDEP_H_ */

Modified: head/sys/conf/files.arm64
==============================================================================
--- head/sys/conf/files.arm64	Tue May 19 14:49:31 2015	(r283111)
+++ head/sys/conf/files.arm64	Tue May 19 15:25:47 2015	(r283112)
@@ -1,6 +1,7 @@
 # $FreeBSD$
 arm/arm/devmap.c		standard
 arm/arm/generic_timer.c		standard
+arm/arm/pmu.c			standard
 arm64/arm64/autoconf.c		standard
 arm64/arm64/bcopy.c		standard
 arm64/arm64/bus_machdep.c	standard
@@ -40,6 +41,8 @@ arm64/arm64/uio_machdep.c	standard
 arm64/arm64/vfp.c		standard
 arm64/arm64/vm_machdep.c	standard
 dev/fdt/fdt_arm64.c		optional	fdt
+dev/hwpmc/hwpmc_arm64.c		optional	hwpmc
+dev/hwpmc/hwpmc_arm64_md.c	optional	hwpmc
 dev/ofw/ofw_cpu.c		optional	fdt
 dev/psci/psci.c			optional	psci
 dev/psci/psci_arm64.S		optional	psci

Added: head/sys/dev/hwpmc/hwpmc_arm64.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/dev/hwpmc/hwpmc_arm64.c	Tue May 19 15:25:47 2015	(r283112)
@@ -0,0 +1,544 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory with support from ARM Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+
+#include <machine/pmc_mdep.h>
+#include <machine/cpu.h>
+
+static int arm64_npmcs;
+
+struct arm64_event_code_map {
+	enum pmc_event	pe_ev;
+	uint8_t		pe_code;
+};
+
+/*
+ * Per-processor information.
+ */
+struct arm64_cpu {
+	struct pmc_hw   *pc_arm64pmcs;
+};
+
+static struct arm64_cpu **arm64_pcpu;
+
+/*
+ * Interrupt Enable Set Register
+ */
+static __inline void
+arm64_interrupt_enable(uint32_t pmc)
+{
+	uint32_t reg;
+
+	reg = (1 << pmc);
+	WRITE_SPECIALREG(PMINTENSET_EL1, reg);
+
+	isb();
+}
+
+/*
+ * Interrupt Clear Set Register
+ */
+static __inline void
+arm64_interrupt_disable(uint32_t pmc)
+{
+	uint32_t reg;
+
+	reg = (1 << pmc);
+	WRITE_SPECIALREG(PMINTENCLR_EL1, reg);
+
+	isb();
+}
+
+/*
+ * Counter Set Enable Register
+ */
+static __inline void
+arm64_counter_enable(unsigned int pmc)
+{
+	uint32_t reg;
+
+	reg = (1 << pmc);
+	WRITE_SPECIALREG(PMCNTENSET_EL0, reg);
+
+	isb();
+}
+
+/*
+ * Counter Clear Enable Register
+ */
+static __inline void
+arm64_counter_disable(unsigned int pmc)
+{
+	uint32_t reg;
+
+	reg = (1 << pmc);
+	WRITE_SPECIALREG(PMCNTENCLR_EL0, reg);
+
+	isb();
+}
+
+/*
+ * Performance Monitors Control Register
+ */
+static uint32_t
+arm64_pmcr_read(void)
+{
+	uint32_t reg;
+
+	reg = READ_SPECIALREG(PMCR_EL0);
+
+	return (reg);
+}
+
+static void
+arm64_pmcr_write(uint32_t reg)
+{
+
+	WRITE_SPECIALREG(PMCR_EL0, reg);
+
+	isb();
+}
+
+/*
+ * Performance Count Register N
+ */
+static uint32_t
+arm64_pmcn_read(unsigned int pmc)
+{
+
+	KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
+
+	WRITE_SPECIALREG(PMSELR_EL0, pmc);
+
+	isb();
+
+	return (READ_SPECIALREG(PMXEVCNTR_EL0));
+}
+
+static void
+arm64_pmcn_write(unsigned int pmc, uint32_t reg)
+{
+
+	KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
+
+	WRITE_SPECIALREG(PMSELR_EL0, pmc);
+	WRITE_SPECIALREG(PMXEVCNTR_EL0, reg);
+
+	isb();
+}
+
+static int
+arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
+  const struct pmc_op_pmcallocate *a)
+{
+	uint32_t caps, config;
+	struct arm64_cpu *pac;
+	enum pmc_event pe;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < arm64_npmcs,
+	    ("[arm64,%d] illegal row index %d", __LINE__, ri));
+
+	pac = arm64_pcpu[cpu];
+
+	caps = a->pm_caps;
+	if (a->pm_class != PMC_CLASS_ARMV8) {
+		return (EINVAL);
+	}
+	pe = a->pm_ev;
+
+	config = (pe & EVENT_ID_MASK);
+	pm->pm_md.pm_arm64.pm_arm64_evsel = config;
+
+	PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%x", ri, config);
+
+	return 0;
+}
+
+
+static int
+arm64_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+	pmc_value_t tmp;
+	struct pmc *pm;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < arm64_npmcs,
+	    ("[arm64,%d] illegal row index %d", __LINE__, ri));
+
+	pm  = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+
+	tmp = arm64_pmcn_read(ri);
+
+	PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp);
+	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+		*v = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+	else
+		*v = tmp;
+
+	return 0;
+}
+
+static int
+arm64_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+	struct pmc *pm;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < arm64_npmcs,
+	    ("[arm64,%d] illegal row-index %d", __LINE__, ri));
+
+	pm  = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+
+	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+		v = ARMV8_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+	PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+	arm64_pmcn_write(ri, v);
+
+	return 0;
+}
+
+static int
+arm64_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+	struct pmc_hw *phw;
+
+	PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < arm64_npmcs,
+	    ("[arm64,%d] illegal row-index %d", __LINE__, ri));
+
+	phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+
+	KASSERT(pm == NULL || phw->phw_pmc == NULL,
+	    ("[arm64,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+	    __LINE__, pm, phw->phw_pmc));
+
+	phw->phw_pmc = pm;
+
+	return 0;
+}
+
+static int
+arm64_start_pmc(int cpu, int ri)
+{
+	struct pmc_hw *phw;
+	uint32_t config;
+	struct pmc *pm;
+
+	phw    = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+	pm     = phw->phw_pmc;
+	config = pm->pm_md.pm_arm64.pm_arm64_evsel;
+
+	/*
+	 * Configure the event selection.
+	 */
+	WRITE_SPECIALREG(PMSELR_EL0, ri);
+	WRITE_SPECIALREG(PMXEVTYPER_EL0, config);
+
+	isb();
+
+	/*
+	 * Enable the PMC.
+	 */
+	arm64_interrupt_enable(ri);
+	arm64_counter_enable(ri);
+
+	return 0;
+}
+
+static int
+arm64_stop_pmc(int cpu, int ri)
+{
+	struct pmc_hw *phw;
+	struct pmc *pm;
+
+	phw    = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+	pm     = phw->phw_pmc;
+
+	/*
+	 * Disable the PMCs.
+	 */
+	arm64_counter_disable(ri);
+	arm64_interrupt_disable(ri);
+
+	return 0;
+}
+
+static int
+arm64_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+	struct pmc_hw *phw;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < arm64_npmcs,
+	    ("[arm64,%d] illegal row-index %d", __LINE__, ri));
+
+	phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+	KASSERT(phw->phw_pmc == NULL,
+	    ("[arm64,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+	return 0;
+}
+
+static int
+arm64_intr(int cpu, struct trapframe *tf)
+{
+	struct arm64_cpu *pc;
+	int retval, ri;
+	struct pmc *pm;
+	int error;
+	int reg;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d] CPU %d out of range", __LINE__, cpu));
+
+	retval = 0;
+	pc = arm64_pcpu[cpu];
+
+	for (ri = 0; ri < arm64_npmcs; ri++) {
+		pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+		if (pm == NULL)
+			continue;
+		if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+			continue;
+
+		/* Check if counter is overflowed */
+		reg = (1 << ri);
+		if ((READ_SPECIALREG(PMOVSCLR_EL0) & reg) == 0)
+			continue;
+		/* Clear Overflow Flag */
+		WRITE_SPECIALREG(PMOVSCLR_EL0, reg);
+
+		isb();
+
+		retval = 1; /* Found an interrupting PMC. */
+		if (pm->pm_state != PMC_STATE_RUNNING)
+			continue;
+
+		error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+		    TRAPF_USERMODE(tf));
+		if (error)
+			arm64_stop_pmc(cpu, ri);
+
+		/* Reload sampling count */
+		arm64_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
+	}
+
+	return (retval);
+}
+
+static int
+arm64_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+	char arm64_name[PMC_NAME_MAX];
+	struct pmc_hw *phw;
+	int error;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d], illegal CPU %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < arm64_npmcs,
+	    ("[arm64,%d] row-index %d out of range", __LINE__, ri));
+
+	phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+	snprintf(arm64_name, sizeof(arm64_name), "ARMV8-%d", ri);
+	if ((error = copystr(arm64_name, pi->pm_name, PMC_NAME_MAX,
+	    NULL)) != 0)
+		return (error);
+	pi->pm_class = PMC_CLASS_ARMV8;
+	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+		pi->pm_enabled = TRUE;
+		*ppmc = phw->phw_pmc;
+	} else {
+		pi->pm_enabled = FALSE;
+		*ppmc = NULL;
+	}
+
+	return (0);
+}
+
+static int
+arm64_get_config(int cpu, int ri, struct pmc **ppm)
+{
+
+	*ppm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+
+	return (0);
+}
+
+/*
+ * XXX don't know what we should do here.
+ */
+static int
+arm64_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+
+	return (0);
+}
+
+static int
+arm64_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+
+	return (0);
+}
+
+static int
+arm64_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+	struct arm64_cpu *pac;
+	struct pmc_hw  *phw;
+	struct pmc_cpu *pc;
+	uint64_t pmcr;
+	int first_ri;
+	int i;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[arm64,%d] wrong cpu number %d", __LINE__, cpu));
+	PMCDBG1(MDP, INI, 1, "arm64-init cpu=%d", cpu);
+
+	arm64_pcpu[cpu] = pac = malloc(sizeof(struct arm64_cpu), M_PMC,
+	    M_WAITOK | M_ZERO);
+
+	pac->pc_arm64pmcs = malloc(sizeof(struct pmc_hw) * arm64_npmcs,
+	    M_PMC, M_WAITOK | M_ZERO);
+	pc = pmc_pcpu[cpu];
+	first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8].pcd_ri;
+	KASSERT(pc != NULL, ("[arm64,%d] NULL per-cpu pointer", __LINE__));
+
+	for (i = 0, phw = pac->pc_arm64pmcs; i < arm64_npmcs; i++, phw++) {
+		phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
+		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+		phw->phw_pmc      = NULL;
+		pc->pc_hwpmcs[i + first_ri] = phw;
+	}
+
+	/* Enable unit */
+	pmcr = arm64_pmcr_read();
+	pmcr |= PMCR_E;
+	arm64_pmcr_write(pmcr);
+
+	return (0);
+}
+
+static int
+arm64_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+	uint32_t pmcr;
+
+	pmcr = arm64_pmcr_read();
+	pmcr &= ~PMCR_E;
+	arm64_pmcr_write(pmcr);
+
+	return (0);
+}
+
+struct pmc_mdep *
+pmc_arm64_initialize()
+{
+	struct pmc_mdep *pmc_mdep;
+	struct pmc_classdep *pcd;
+	int idcode;
+	int reg;
+
+	reg = arm64_pmcr_read();
+	arm64_npmcs = (reg & PMCR_N_MASK) >> PMCR_N_SHIFT;
+	idcode = (reg & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
+
+	PMCDBG1(MDP, INI, 1, "arm64-init npmcs=%d", arm64_npmcs);
+
+	/*
+	 * Allocate space for pointers to PMC HW descriptors and for
+	 * the MDEP structure used by MI code.
+	 */
+	arm64_pcpu = malloc(sizeof(struct arm64_cpu *) * pmc_cpu_max(),
+		M_PMC, M_WAITOK | M_ZERO);
+
+	/* Just one class */
+	pmc_mdep = pmc_mdep_alloc(1);
+
+	switch (idcode) {
+	case PMCR_IDCODE_CORTEX_A57:
+	case PMCR_IDCODE_CORTEX_A72:
+		pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A57;
+		break;
+	default:
+	case PMCR_IDCODE_CORTEX_A53:
+		pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A53;
+		break;
+	}
+
+	pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8];
+	pcd->pcd_caps  = ARMV8_PMC_CAPS;
+	pcd->pcd_class = PMC_CLASS_ARMV8;
+	pcd->pcd_num   = arm64_npmcs;
+	pcd->pcd_ri    = pmc_mdep->pmd_npmc;
+	pcd->pcd_width = 32;
+
+	pcd->pcd_allocate_pmc   = arm64_allocate_pmc;
+	pcd->pcd_config_pmc     = arm64_config_pmc;
+	pcd->pcd_pcpu_fini      = arm64_pcpu_fini;
+	pcd->pcd_pcpu_init      = arm64_pcpu_init;
+	pcd->pcd_describe       = arm64_describe;
+	pcd->pcd_get_config     = arm64_get_config;
+	pcd->pcd_read_pmc       = arm64_read_pmc;
+	pcd->pcd_release_pmc    = arm64_release_pmc;
+	pcd->pcd_start_pmc      = arm64_start_pmc;
+	pcd->pcd_stop_pmc       = arm64_stop_pmc;
+	pcd->pcd_write_pmc      = arm64_write_pmc;
+
+	pmc_mdep->pmd_intr       = arm64_intr;
+	pmc_mdep->pmd_switch_in  = arm64_switch_in;
+	pmc_mdep->pmd_switch_out = arm64_switch_out;
+
+	pmc_mdep->pmd_npmc   += arm64_npmcs;
+
+	return (pmc_mdep);
+}
+
+void
+pmc_arm64_finalize(struct pmc_mdep *md)
+{
+
+}

Added: head/sys/dev/hwpmc/hwpmc_arm64.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/dev/hwpmc/hwpmc_arm64.h	Tue May 19 15:25:47 2015	(r283112)
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory with support from ARM Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_ARMV8_H_
+#define _DEV_HWPMC_ARMV8_H_
+
+#define	ARMV8_PMC_CAPS		(PMC_CAP_INTERRUPT | PMC_CAP_USER |	\
+				 PMC_CAP_SYSTEM | PMC_CAP_EDGE |	\
+				 PMC_CAP_THRESHOLD | PMC_CAP_READ |	\
+				 PMC_CAP_WRITE | PMC_CAP_INVERT |	\
+				 PMC_CAP_QUALIFIER)
+
+#define	ARMV8_RELOAD_COUNT_TO_PERFCTR_VALUE(R)	(-(R))
+#define	ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(P)	(-(P))
+#define	EVENT_ID_MASK	0xFF
+
+#ifdef _KERNEL
+/* MD extension for 'struct pmc' */
+struct pmc_md_arm64_pmc {
+	uint32_t	pm_arm64_evsel;
+};
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_ARMV8_H_ */

Added: head/sys/dev/hwpmc/hwpmc_arm64_md.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/dev/hwpmc/hwpmc_arm64_md.c	Tue May 19 15:25:47 2015	(r283112)
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory with support from ARM Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/stack.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+
+	return (pmc_arm64_initialize());
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+
+	pmc_arm64_finalize(md);
+}
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
+    struct trapframe *tf)
+{
+	uintptr_t pc, r, stackstart, stackend, fp;
+	struct thread *td;
+	int count;
+
+	KASSERT(TRAPF_USERMODE(tf) == 0,("[arm,%d] not a kernel backtrace",
+	    __LINE__));
+
+	td = curthread;
+	pc = PMC_TRAPFRAME_TO_PC(tf);
+	*cc++ = pc;
+
+	if (maxsamples <= 1)
+		return (1);
+
+	stackstart = (uintptr_t) td->td_kstack;
+	stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
+	fp = PMC_TRAPFRAME_TO_FP(tf);
+
+	if (!PMC_IN_KERNEL(pc) ||
+	    !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
+		return (1);
+
+	for (count = 1; count < maxsamples; count++) {
+		/* Use saved lr as pc. */
+		r = fp + sizeof(uintptr_t);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201505191525.t4JFPm9e009810>