Commit fe5bb6b0 authored by Jack Steiner's avatar Jack Steiner Committed by Linus Torvalds
Browse files

sgi-gru: misc GRU cleanup



Misc trivial GRU drivers fixes:
	- fix long lines
	- eliminate extra whitespace
	- eliminate compiler warning
	- better validation of invalidate user parameters
	- bug fix for GRU TLB flush (not the cpu TLB flush)

These changes are all internal to the SGI GRU driver and have no effect
on the base kernel.

Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 66666e50
Loading
Loading
Loading
Loading
+12 −10
Original line number Diff line number Diff line
@@ -19,8 +19,11 @@
#ifndef __GRU_INSTRUCTIONS_H__
#define __GRU_INSTRUCTIONS_H__

#define gru_flush_cache_hook(p)
#define gru_emulator_wait_hook(p, w)
extern int gru_check_status_proc(void *cb);
extern int gru_wait_proc(void *cb);
extern void gru_wait_abort_proc(void *cb);



/*
 * Architecture dependent functions
@@ -29,7 +32,7 @@
#if defined(CONFIG_IA64)
#include <linux/compiler.h>
#include <asm/intrinsics.h>
#define __flush_cache(p)		ia64_fc(p)
#define __flush_cache(p)		ia64_fc((unsigned long)p)
/* Use volatile on IA64 to ensure ordering via st4.rel */
#define gru_ordered_store_int(p, v)					\
		do {							\
@@ -558,20 +561,19 @@ extern int gru_get_cb_exception_detail(void *cb,

#define GRU_EXC_STR_SIZE		256

extern int gru_check_status_proc(void *cb);
extern int gru_wait_proc(void *cb);
extern void gru_wait_abort_proc(void *cb);

/*
 * Control block definition for checking status
 */
struct gru_control_block_status {
	unsigned int	icmd		:1;
	unsigned int	unused1		:31;
	unsigned int	ima		:3;
	unsigned int	reserved0	:4;
	unsigned int	unused1		:24;
	unsigned int	unused2		:24;
	unsigned int	istatus		:2;
	unsigned int	isubstatus	:4;
	unsigned int	inused3		:2;
	unsigned int	unused3		:2;
};

/* Get CB status */
+12 −7
Original line number Diff line number Diff line
@@ -368,6 +368,7 @@ failupm:

failfmm:
	/* FMM state on UPM call */
	gru_flush_cache(tfh);
	STAT(tlb_dropin_fail_fmm);
	gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
	return 0;
@@ -497,10 +498,8 @@ int gru_handle_user_call_os(unsigned long cb)
	if (!gts)
		return -EINVAL;

	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
		ret = -EINVAL;
	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
		goto exit;
	}

	/*
	 * If force_unload is set, the UPM TLB fault is phony. The task
@@ -508,6 +507,10 @@ int gru_handle_user_call_os(unsigned long cb)
	 * unload the context. The task will page fault and assign a new
	 * context.
	 */
	if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
				gts->ts_blade != uv_numa_blade_id())
		gts->ts_force_unload = 1;

	ret = -EAGAIN;
	cbrnum = thread_cbr_number(gts, ucbnum);
	if (gts->ts_force_unload) {
@@ -541,8 +544,10 @@ int gru_get_exception_detail(unsigned long arg)
	if (!gts)
		return -EINVAL;

	if (gts->ts_gru) {
	ucbnum = get_cb_number((void *)excdet.cb);
	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
		ret = -EINVAL;
	} else if (gts->ts_gru) {
		cbrnum = thread_cbr_number(gts, ucbnum);
		cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
		prefetchw(cbe);/* Harmless on hardware, required for emulator */
@@ -609,7 +614,7 @@ int gru_user_flush_tlb(unsigned long arg)
	if (!gts)
		return -EINVAL;

	gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len);
	gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
	gru_unlock_gts(gts);

	return 0;
+15 −10
Original line number Diff line number Diff line
@@ -45,7 +45,8 @@
#include <asm/uv/uv_mmrs.h>

struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
unsigned long gru_start_paddr, gru_end_paddr __read_mostly;
unsigned long gru_start_paddr __read_mostly;
unsigned long gru_end_paddr __read_mostly;
struct gru_stats_s gru_stats;

/* Guaranteed user available resources on each node */
@@ -295,7 +296,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
	for_each_online_node(nid) {
		bid = uv_node_to_blade_id(nid);
		pnode = uv_node_to_pnode(nid);
		if (gru_base[bid])
		if (bid < 0 || gru_base[bid])
			continue;
		page = alloc_pages_node(nid, GFP_KERNEL, order);
		if (!page)
@@ -312,7 +313,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
				chip++, gru++) {
			paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
			vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
			gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip);
			gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
			n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
			cbrs = max(cbrs, n);
			n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@@ -370,26 +371,26 @@ static int __init gru_init(void)
	void *gru_start_vaddr;

	if (!is_uv_system())
		return 0;
		return -ENODEV;

#if defined CONFIG_IA64
	gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
#else
	gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
				0x7fffffffffffUL;

#endif
	gru_start_vaddr = __va(gru_start_paddr);
	gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE;
	gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
	printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
	       gru_start_paddr, gru_end_paddr);
	irq = get_base_irq();
	for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
		ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
		/* TODO: fix irq handling on x86. For now ignore failures because
		/* TODO: fix irq handling on x86. For now ignore failure because
		 * interrupts are not required & not yet fully supported */
		if (ret) {
			printk("!!!WARNING: GRU ignoring request failure!!!\n");
			printk(KERN_WARNING
			       "!!!WARNING: GRU ignoring request failure!!!\n");
			ret = 0;
		}
		if (ret) {
@@ -469,7 +470,11 @@ struct vm_operations_struct gru_vm_ops = {
	.fault		= gru_fault,
};

#ifndef MODULE
fs_initcall(gru_init);
#else
module_init(gru_init);
#endif
module_exit(gru_exit);

module_param(gru_options, ulong, 0644);
+3 −3
Original line number Diff line number Diff line
@@ -432,8 +432,8 @@ static inline long gru_copy_handle(void *d, void *s)
	return GRU_HANDLE_BYTES;
}

static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap,
				unsigned long length)
static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
				unsigned long cbrmap, unsigned long length)
{
	int i, scr;

@@ -773,8 +773,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
		return VM_FAULT_SIGBUS;

again:
	preempt_disable();
	mutex_lock(&gts->ts_ctxlock);
	preempt_disable();
	if (gts->ts_gru) {
		if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
			STAT(migrated_nopfn_unload);
+8 −7
Original line number Diff line number Diff line
@@ -278,13 +278,12 @@ struct gru_stats_s {
/* Generate a GRU asid value from a GRU base asid & a virtual address. */
#if defined CONFIG_IA64
#define VADDR_HI_BIT		64
#define GRUREGION(addr)		((addr) >> (VADDR_HI_BIT - 3) & 3)
#elif defined CONFIG_X86_64
#define VADDR_HI_BIT		48
#define GRUREGION(addr)		(0)		/* ZZZ could do better */
#else
#error "Unsupported architecture"
#endif
#define GRUREGION(addr)		((addr) >> (VADDR_HI_BIT - 3) & 3)
#define GRUASID(asid, addr)	((asid) + GRUREGION(addr))

/*------------------------------------------------------------------------------
@@ -297,12 +296,12 @@ struct gru_state;
 * This structure is pointed to from the mmstruct via the notifier pointer.
 * There is one of these per address space.
 */
struct gru_mm_tracker {
	unsigned int		mt_asid_gen;	/* ASID wrap count */
	int			mt_asid;	/* current base ASID for gru */
	unsigned short		mt_ctxbitmap;	/* bitmap of contexts using
struct gru_mm_tracker {				/* pack to reduce size */
	unsigned int		mt_asid_gen:24;	/* ASID wrap count */
	unsigned int		mt_asid:24;	/* current base ASID for gru */
	unsigned short		mt_ctxbitmap:16;/* bitmap of contexts using
						   asid */
};
} __attribute__ ((packed));

struct gru_mm_struct {
	struct mmu_notifier	ms_notifier;
@@ -359,6 +358,8 @@ struct gru_thread_state {
						   required for contest */
	unsigned char		ts_cbr_au_count;/* Number of CBR resources
						   required for contest */
	char			ts_blade;	/* If >= 0, migrate context if
						   ref from diferent blade */
	char			ts_force_unload;/* force context to be unloaded
						   after migration */
	char			ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
Loading