Commit 793423ff authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux into drm-next

- Updated register headers for GFX 8.1 for Stoney
- Add some new CZ revisions
- minor pageflip optimizations
- Fencing clean up
- Warning fix
- More fence cleanup
- oops fix
- Fiji fixes

* 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux: (29 commits)
  drm/amdgpu: group together common fence implementation
  drm/amdgpu: remove AMDGPU_FENCE_OWNER_MOVE
  drm/amdgpu: remove now unused fence functions
  drm/amdgpu: fix fence fallback check
  drm/amdgpu: fix stoping the scheduler timeout
  drm/amdgpu: cleanup on error in amdgpu_cs_ioctl()
  drm/amdgpu: update Fiji's Golden setting
  drm/amdgpu: update Fiji's rev id
  drm/amdgpu: extract common code in vi_common_early_init
  drm/amd/scheduler: don't oops on failure to load
  drm/amdgpu: don't oops on failure to load (v2)
  drm/amdgpu: don't VT switch on suspend
  drm/amdgpu: Make amdgpu_mn functions inline
  drm/amdgpu: remove amdgpu_fence_ref/unref
  drm/amdgpu: use common fence for sync
  drm/amdgpu: use the new fence_is_later
  drm/amdgpu: use common fences for VMID management v2
  drm/amdgpu: move ring_from_fence to common code
  drm/amdgpu: switch to common fence_wait_any_timeout v2
  drm/amdgpu: remove unneeded fence functions
  ...
parents bf248ca1 a95e2642
Loading
Loading
Loading
Loading
+98 −0
Original line number Diff line number Diff line
@@ -397,6 +397,104 @@ out:
}
EXPORT_SYMBOL(fence_default_wait);

static bool
fence_test_signaled_any(struct fence **fences, uint32_t count)
{
	int i;

	for (i = 0; i < count; ++i) {
		struct fence *fence = fences[i];
		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
			return true;
	}
	return false;
}

/**
 * fence_wait_any_timeout - sleep until any fence gets signaled
 * or until timeout elapses
 * @fences:	[in]	array of fences to wait on
 * @count:	[in]	number of fences to wait on
 * @intr:	[in]	if true, do an interruptible wait
 * @timeout:	[in]	timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 *
 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
 * on success.
 *
 * Synchronous waits for the first fence in the array to be signaled. The
 * caller needs to hold a reference to all fences in the array, otherwise a
 * fence might be freed before return, resulting in undefined behavior.
 */
signed long
fence_wait_any_timeout(struct fence **fences, uint32_t count,
		       bool intr, signed long timeout)
{
	struct default_wait_cb *cb;
	signed long ret = timeout;
	unsigned i;

	if (WARN_ON(!fences || !count || timeout < 0))
		return -EINVAL;

	if (timeout == 0) {
		for (i = 0; i < count; ++i)
			if (fence_is_signaled(fences[i]))
				return 1;

		return 0;
	}

	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
	if (cb == NULL) {
		ret = -ENOMEM;
		goto err_free_cb;
	}

	for (i = 0; i < count; ++i) {
		struct fence *fence = fences[i];

		if (fence->ops->wait != fence_default_wait) {
			ret = -EINVAL;
			goto fence_rm_cb;
		}

		cb[i].task = current;
		if (fence_add_callback(fence, &cb[i].base,
				       fence_default_wait_cb)) {
			/* This fence is already signaled */
			goto fence_rm_cb;
		}
	}

	while (ret > 0) {
		if (intr)
			set_current_state(TASK_INTERRUPTIBLE);
		else
			set_current_state(TASK_UNINTERRUPTIBLE);

		if (fence_test_signaled_any(fences, count))
			break;

		ret = schedule_timeout(ret);

		if (ret > 0 && intr && signal_pending(current))
			ret = -ERESTARTSYS;
	}

	__set_current_state(TASK_RUNNING);

fence_rm_cb:
	while (i-- > 0)
		fence_remove_callback(fences[i], &cb[i].base);

err_free_cb:
	kfree(cb);

	return ret;
}
EXPORT_SYMBOL(fence_wait_any_timeout);

/**
 * fence_init - Initialize a custom fence.
 * @fence:	[in]	the fence to initialize
+6 −52
Original line number Diff line number Diff line
@@ -405,7 +405,6 @@ struct amdgpu_fence_driver {
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
#define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
#define AMDGPU_FENCE_OWNER_MOVE		((void*)2ul)

#define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
#define AMDGPU_FENCE_FLAG_INT           (1 << 1)
@@ -447,57 +446,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);

signed long amdgpu_fence_wait_any(struct fence **array,
				  uint32_t count,
				  bool intr,
				  signed long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);

bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
			    struct amdgpu_ring *ring);
void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
			    struct amdgpu_ring *ring);

static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
						      struct amdgpu_fence *b)
{
	if (!a) {
		return b;
	}

	if (!b) {
		return a;
	}

	BUG_ON(a->ring != b->ring);

	if (a->seq > b->seq) {
		return a;
	} else {
		return b;
	}
}

static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
					   struct amdgpu_fence *b)
{
	if (!a) {
		return false;
	}

	if (!b) {
		return true;
	}

	BUG_ON(a->ring != b->ring);

	return a->seq < b->seq;
}

int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
			   void *owner, struct amdgpu_fence **fence);

/*
 * TTM.
 */
@@ -708,7 +661,7 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
 */
struct amdgpu_sync {
	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
	struct amdgpu_fence	*sync_to[AMDGPU_MAX_RINGS];
	struct fence		*sync_to[AMDGPU_MAX_RINGS];
	DECLARE_HASHTABLE(fences, 4);
	struct fence	        *last_vm_update;
};
@@ -974,7 +927,7 @@ struct amdgpu_vm_id {
	/* last flushed PD/PT update */
	struct fence	        *flushed_updates;
	/* last use of vmid */
	struct amdgpu_fence	*last_id_use;
	struct fence		*last_id_use;
};

struct amdgpu_vm {
@@ -1007,7 +960,7 @@ struct amdgpu_vm {
};

struct amdgpu_vm_manager {
	struct amdgpu_fence		*active[AMDGPU_NUM_VM];
	struct fence			*active[AMDGPU_NUM_VM];
	uint32_t			max_pfn;
	/* number of VMIDs */
	unsigned			nvm;
@@ -1235,6 +1188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
		     enum amdgpu_ring_type ring_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);

/*
 * CS.
@@ -1758,11 +1712,11 @@ void amdgpu_test_syncing(struct amdgpu_device *adev);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
	return -ENODEV;
}
static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
#endif

/*
+4 −2
Original line number Diff line number Diff line
@@ -876,8 +876,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
		struct amdgpu_job *job;
		struct amdgpu_ring * ring =  parser->ibs->ring;
		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
		if (!job)
			return -ENOMEM;
		if (!job) {
			r = -ENOMEM;
			goto out;
		}
		job->base.sched = &ring->sched;
		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
		job->adev = parser->adev;
+3 −0
Original line number Diff line number Diff line
@@ -69,6 +69,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
	struct amdgpu_device *adev = ctx->adev;
	unsigned i, j;

	if (!adev)
		return;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
			fence_put(ctx->rings[i].fences[j]);
+1 −0
Original line number Diff line number Diff line
@@ -207,6 +207,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
	}

	info->par = rfbdev;
	info->skip_vt_switch = true;

	ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
	if (ret) {
Loading