Commit 71724f70 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/mm: Use helpers for drm_mm_node booleans



In preparation for rearranging the booleans into a flags field, ensure
all the current users are using the inline helpers and not directly
accessing the members.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003210100.22250-3-chris@chris-wilson.co.uk
parent 9c98f021
Loading
Loading
Loading
Loading
+12 −7
Original line number Diff line number Diff line
@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
	if (drm_mm_node_allocated(hole_node)) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);

static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
{
	return node->scanned_block;
}

/**
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;

	DRM_MM_BUG_ON(!node->allocated);
	DRM_MM_BUG_ON(node->scanned_block);
	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));

	prev_node = list_prev_entry(node, node_list);

@@ -605,7 +610,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	struct drm_mm *mm = old->mm;

	DRM_MM_BUG_ON(!old->allocated);
	DRM_MM_BUG_ON(!drm_mm_node_allocated(old));

	*new = *old;

@@ -731,8 +736,8 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
	u64 adj_start, adj_end;

	DRM_MM_BUG_ON(node->mm != mm);
	DRM_MM_BUG_ON(!node->allocated);
	DRM_MM_BUG_ON(node->scanned_block);
	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
	node->scanned_block = true;
	mm->scan_active++;

@@ -818,7 +823,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
	struct drm_mm_node *prev_node;

	DRM_MM_BUG_ON(node->mm != scan->mm);
	DRM_MM_BUG_ON(!node->scanned_block);
	DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
	node->scanned_block = false;

	DRM_MM_BUG_ON(!node->mm->scan_active);
+2 −2
Original line number Diff line number Diff line
@@ -963,7 +963,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
		io_mapping_unmap_atomic((void __iomem *)vaddr);

		if (cache->node.allocated) {
		if (drm_mm_node_allocated(&cache->node)) {
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
@@ -1056,7 +1056,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
	}

	offset = cache->node.start;
	if (cache->node.allocated) {
	if (drm_mm_node_allocated(&cache->node)) {
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
+1 −1
Original line number Diff line number Diff line
@@ -400,7 +400,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
	struct drm_mm_node *node = &ggtt->uc_fw;

	GEM_BUG_ON(!node->allocated);
	GEM_BUG_ON(!drm_mm_node_allocated(node));
	GEM_BUG_ON(upper_32_bits(node->start));
	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));

+6 −6
Original line number Diff line number Diff line
@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
		if (ret)
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
		GEM_BUG_ON(!drm_mm_node_allocated(&node));
	}

	mutex_unlock(&i915->drm.struct_mutex);
@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
		if (drm_mm_node_allocated(&node)) {
			ggtt->vm.insert_page(&ggtt->vm,
					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					     node.start, I915_CACHE_NONE, 0);
@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
	i915_gem_object_unlock_fence(obj, fence);
out_unpin:
	mutex_lock(&i915->drm.struct_mutex);
	if (node.allocated) {
	if (drm_mm_node_allocated(&node)) {
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
		remove_mappable_node(&node);
	} else {
@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
		if (ret)
			goto out_rpm;
		GEM_BUG_ON(!node.allocated);
		GEM_BUG_ON(!drm_mm_node_allocated(&node));
	}

	mutex_unlock(&i915->drm.struct_mutex);
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
		if (drm_mm_node_allocated(&node)) {
			/* flush the write before we modify the GGTT */
			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
			ggtt->vm.insert_page(&ggtt->vm,
@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
	mutex_lock(&i915->drm.struct_mutex);
	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
	if (node.allocated) {
	if (drm_mm_node_allocated(&node)) {
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
		remove_mappable_node(&node);
	} else {
+1 −1
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
			break;
		}

		GEM_BUG_ON(!node->allocated);
		GEM_BUG_ON(!drm_mm_node_allocated(node));
		vma = container_of(node, typeof(*vma), node);

		/* If we are using coloring to insert guard pages between
Loading