| 
91 | 91 | 	msr	cntvoff_el2, xzr		// Clear virtual offset  | 
92 | 92 | .endm  | 
93 | 93 | 
 
  | 
 | 94 | +/* Branch to skip_label if SPE version is less than given version */  | 
 | 95 | +.macro __spe_vers_imp skip_label, version, tmp  | 
 | 96 | +    mrs    \tmp, id_aa64dfr0_el1  | 
 | 97 | +    ubfx   \tmp, \tmp, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4  | 
 | 98 | +    cmp    \tmp, \version  | 
 | 99 | +    b.lt   \skip_label  | 
 | 100 | +.endm  | 
 | 101 | + | 
94 | 102 | .macro __init_el2_debug  | 
95 | 103 | 	mrs	x1, id_aa64dfr0_el1  | 
96 | 104 | 	ubfx	x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4  | 
 | 
103 | 111 | 	csel	x2, xzr, x0, eq			// all PMU counters from EL1  | 
104 | 112 | 
 
  | 
105 | 113 | 	/* Statistical profiling */  | 
106 |  | -	ubfx	x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4  | 
107 |  | -	cbz	x0, .Lskip_spe_\@		// Skip if SPE not present  | 
 | 114 | +	__spe_vers_imp .Lskip_spe_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x0 // Skip if SPE not present  | 
108 | 115 | 
 
  | 
109 | 116 | 	mrs_s	x0, SYS_PMBIDR_EL1              // If SPE available at EL2,  | 
110 | 117 | 	and	x0, x0, #(1 << PMBIDR_EL1_P_SHIFT)  | 
 | 
263 | 270 | 
 
  | 
264 | 271 | 	mov	x0, xzr  | 
265 | 272 | 	mov	x2, xzr  | 
266 |  | -	mrs	x1, id_aa64dfr0_el1  | 
267 |  | -	ubfx	x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4  | 
268 |  | -	cmp	x1, #3  | 
269 |  | -	b.lt	.Lskip_spe_fgt_\@  | 
 | 273 | +	/* If SPEv1p2 is implemented, */  | 
 | 274 | +	__spe_vers_imp .Lskip_spe_fgt_\@, #ID_AA64DFR0_EL1_PMSVer_V1P2, x1  | 
270 | 275 | 	/* Disable PMSNEVFR_EL1 read and write traps */  | 
271 | 276 | 	orr	x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK  | 
272 | 277 | 	orr	x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK  | 
 | 
0 commit comments