Commit 32b0ed3a authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

 into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - 'perf probe' improvements: (Masami Hiramatsu)

    - Support glob wildcards for function name
    - Support $params special probe argument: Collect all function arguments
    - Make --line checks validate C-style function name.
    - Add --no-inlines option to avoid searching inline functions

  - Introduce new 'perf bench futex' benchmark: 'wake-parallel', to
    measure parallel waker threads generating contention for kernel
    locks (hb->lock). (Davidlohr Bueso)

Bug fixes:

  - Improve 'perf top' to survive much longer on high core count machines,
    more work needed to refcount more data structures besides 'struct thread'
    and fix more races. (Arnaldo Carvalho de Melo)

Infrastructure changes:

  - Move barrier.h mb/rmb/wmb API from tools/perf/ to kernel like tools/arch/
    hierarchy. (Arnaldo Carvalho de Melo)

  - Borrow atomic.h from the kernel, initially the x86 implementations
    with a fallback to gcc intrinsics for the other arches, all the kernel
    like framework in place for doing arch specific implementations,
    preferrably cloning what is in the kernel to the greater extent
    possible. (Arnaldo Carvalho de Melo)

  - Protect the 'struct thread' lifetime with a reference counter,
    and protect data structures that contains its instances with
    a mutex. (Arnaldo Carvalho de Melo

  - Disable libdw DWARF unwind when built with NO_DWARF (Naveen N. Rao)

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents cb307113 76d40849
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
#ifndef __TOOLS_LINUX_ASM_ALPHA_BARRIER_H
#define __TOOLS_LINUX_ASM_ALPHA_BARRIER_H

#define mb()	__asm__ __volatile__("mb": : :"memory")
#define rmb()	__asm__ __volatile__("mb": : :"memory")
#define wmb()	__asm__ __volatile__("wmb": : :"memory")

#endif		/* __TOOLS_LINUX_ASM_ALPHA_BARRIER_H */
+12 −0
Original line number Diff line number Diff line
#ifndef _TOOLS_LINUX_ASM_ARM_BARRIER_H
#define _TOOLS_LINUX_ASM_ARM_BARRIER_H

/*
 * Use the __kuser_memory_barrier helper in the CPU helper page. See
 * arch/arm/kernel/entry-armv.S in the kernel source for details.
 */
#define mb()		((void(*)(void))0xffff0fa0)()
#define wmb()		((void(*)(void))0xffff0fa0)()
#define rmb()		((void(*)(void))0xffff0fa0)()

#endif /* _TOOLS_LINUX_ASM_ARM_BARRIER_H */
+16 −0
Original line number Diff line number Diff line
#ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
#define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H

/*
 * From tools/perf/perf-sys.h, last modified in:
 * f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
 *
 * XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
 * a case like for arm32 where we do things differently in userspace?
 */

#define mb()		asm volatile("dmb ish" ::: "memory")
#define wmb()		asm volatile("dmb ishst" ::: "memory")
#define rmb()		asm volatile("dmb ishld" ::: "memory")

#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
+48 −0
Original line number Diff line number Diff line
/*
 * Copied from the kernel sources to tools/:
 *
 * Memory barrier definitions.  This is based on information published
 * in the Processor Abstraction Layer and the System Abstraction Layer
 * manual.
 *
 * Copyright (C) 1998-2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
 */
#ifndef _TOOLS_LINUX_ASM_IA64_BARRIER_H
#define _TOOLS_LINUX_ASM_IA64_BARRIER_H

#include <linux/compiler.h>

/*
 * Macros to force memory ordering.  In these descriptions, "previous"
 * and "subsequent" refer to program order; "visible" means that all
 * architecturally visible effects of a memory access have occurred
 * (at a minimum, this means the memory has been read or written).
 *
 *   wmb():	Guarantees that all preceding stores to memory-
 *		like regions are visible before any subsequent
 *		stores and that all following stores will be
 *		visible only after all previous stores.
 *   rmb():	Like wmb(), but for reads.
 *   mb():	wmb()/rmb() combo, i.e., all previous memory
 *		accesses are visible before all subsequent
 *		accesses and vice versa.  This is also known as
 *		a "fence."
 *
 * Note: "mb()" and its variants cannot be used as a fence to order
 * accesses to memory mapped I/O registers.  For that, mf.a needs to
 * be used.  However, we don't want to always use mf.a because (a)
 * it's (presumably) much slower than mf and (b) mf.a is supported for
 * sequential memory pages only.
 */

/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
#define ia64_mf()       asm volatile ("mf" ::: "memory")

#define mb()		ia64_mf()
#define rmb()		mb()
#define wmb()		mb()

#endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */
+20 −0
Original line number Diff line number Diff line
#ifndef _TOOLS_LINUX_ASM_MIPS_BARRIER_H
#define _TOOLS_LINUX_ASM_MIPS_BARRIER_H
/*
 * FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
 * in c1e028ef40b8d6943b767028ba17d4f2ba020edb, more work needed to make it
 * more closely follow the Linux kernel arch/mips/include/asm/barrier.h file.
 * Probably when we continue work on tools/ Kconfig support to have all the
 * CONFIG_ needed for properly doing that.
 */
#define mb()		asm volatile(					\
				".set	mips2\n\t"			\
				"sync\n\t"				\
				".set	mips0"				\
				: /* no output */			\
				: /* no input */			\
				: "memory")
#define wmb()	mb()
#define rmb()	mb()

#endif /* _TOOLS_LINUX_ASM_MIPS_BARRIER_H */
Loading