Commit be452c4e authored by Daniel Vetter's avatar Daniel Vetter
Browse files

Merge tag 'drm-next-5.6-2019-12-11' of git://people.freedesktop.org/~agd5f/linux into drm-next

drm-next-5.6-2019-12-11:

amdgpu:
- Add MST atomic routines
- Add support for DMCUB (new helper microengine for displays)
- Add OEM i2c support in DC
- Use vstartup for vblank events on DCN
- Simplify Kconfig for DC
- Renoir fixes for DC
- Clean up function pointers in DC
- Initial support for HDCP 2.x
- Misc code cleanups
- GFX10 fixes
- Rework JPEG engine handling for VCN
- Add clock and power gating support for JPEG
- BACO support for Arcturus
- Cleanup PSP ring handling
- Add framework for using BACO with runtime pm to save power
- Move core pci state handling out of the driver for pm ops
- Allow guest power control in 1 VF case with SR-IOV
- SR-IOV fixes
- RAS fixes
- Support for power metrics on renoir
- Golden settings updates for gfx10
- Enable gfxoff on supported navi10 skus
- Update MAINTAINERS

amdkfd:
- Clean up generational gfx code
- Fixes for gfx10
- DIQ fixes
- Share more code with amdgpu

radeon:
- PPC DMA fix
- Register checker fixes for r1xx/r2xx
- Misc cleanups

From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191211223020.7510-1-alexander.deucher@amd.com
parents d2e53228 ad808910
Loading
Loading
Loading
Loading
+6 −2
Original line number Original line Diff line number Diff line
@@ -147,12 +147,16 @@ amdgpu-y += \
	vce_v3_0.o \
	vce_v3_0.o \
	vce_v4_0.o
	vce_v4_0.o


# add VCN block
# add VCN and JPEG block
amdgpu-y += \
amdgpu-y += \
	amdgpu_vcn.o \
	amdgpu_vcn.o \
	vcn_v1_0.o \
	vcn_v1_0.o \
	vcn_v2_0.o \
	vcn_v2_0.o \
	vcn_v2_5.o
	vcn_v2_5.o \
	amdgpu_jpeg.o \
	jpeg_v1_0.o \
	jpeg_v2_0.o \
	jpeg_v2_5.o


# add ATHUB block
# add ATHUB block
amdgpu-y += \
amdgpu-y += \
+22 −3
Original line number Original line Diff line number Diff line
@@ -69,6 +69,7 @@
#include "amdgpu_uvd.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_vcn.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_mn.h"
#include "amdgpu_mn.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_gfx.h"
@@ -588,6 +589,8 @@ struct amdgpu_asic_funcs {
	bool (*need_reset_on_init)(struct amdgpu_device *adev);
	bool (*need_reset_on_init)(struct amdgpu_device *adev);
	/* PCIe replay counter */
	/* PCIe replay counter */
	uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
	uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
	/* device supports BACO */
	bool (*supports_baco)(struct amdgpu_device *adev);
};
};


/*
/*
@@ -704,6 +707,7 @@ enum amd_hw_ip_block_type {
	MP1_HWIP,
	MP1_HWIP,
	UVD_HWIP,
	UVD_HWIP,
	VCN_HWIP = UVD_HWIP,
	VCN_HWIP = UVD_HWIP,
	JPEG_HWIP = VCN_HWIP,
	VCE_HWIP,
	VCE_HWIP,
	DF_HWIP,
	DF_HWIP,
	DCE_HWIP,
	DCE_HWIP,
@@ -899,6 +903,9 @@ struct amdgpu_device {
	/* vcn */
	/* vcn */
	struct amdgpu_vcn		vcn;
	struct amdgpu_vcn		vcn;


	/* jpeg */
	struct amdgpu_jpeg		jpeg;

	/* firmwares */
	/* firmwares */
	struct amdgpu_firmware		firmware;
	struct amdgpu_firmware		firmware;


@@ -982,6 +989,13 @@ struct amdgpu_device {


	/* device pstate */
	/* device pstate */
	int				pstate;
	int				pstate;
	/* enable runtime pm on the device */
	bool                            runpm;

	bool                            pm_sysfs_en;
	bool                            ucode_sysfs_en;

	bool				in_baco;
};
};


static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1117,6 +1131,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))

#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));


/* Common functions */
/* Common functions */
@@ -1133,9 +1149,12 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
					     const u32 *registers,
					     const u32 *registers,
					     const u32 array_size);
					     const u32 array_size);


bool amdgpu_device_is_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
				      struct amdgpu_device *peer_adev);
				      struct amdgpu_device *peer_adev);
int amdgpu_device_baco_enter(struct drm_device *dev);
int amdgpu_device_baco_exit(struct drm_device *dev);


/* atpx handler */
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -1173,8 +1192,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv);
				 struct drm_file *file_priv);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+2 −4
Original line number Original line Diff line number Diff line
@@ -40,7 +40,7 @@
#include "soc15d.h"
#include "soc15d.h"
#include "mmhub_v1_0.h"
#include "mmhub_v1_0.h"
#include "gfxhub_v1_0.h"
#include "gfxhub_v1_0.h"
#include "gmc_v9_0.h"
#include "mmhub_v9_4.h"




enum hqd_dequeue_request_type {
enum hqd_dequeue_request_type {
@@ -774,9 +774,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
	 * on GFX8 and older.
	 * on GFX8 and older.
	 */
	 */
	if (adev->asic_type == CHIP_ARCTURUS) {
	if (adev->asic_type == CHIP_ARCTURUS) {
		/* Two MMHUBs */
		mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
		mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base);
		mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base);
	} else
	} else
		mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
		mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);


+8 −8
Original line number Original line Diff line number Diff line
@@ -85,7 +85,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
}


/* Set memory usage limits. Current, limits are
/* Set memory usage limits. Current, limits are
 *  System (TTM + userptr) memory - 3/4th System RAM
 *  System (TTM + userptr) memory - 15/16th System RAM
 *  TTM memory - 3/8th System RAM
 *  TTM memory - 3/8th System RAM
 */
 */
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
@@ -98,7 +98,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
	mem *= si.mem_unit;
	mem *= si.mem_unit;


	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
	kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
		(kfd_mem_limit.max_system_mem_limit >> 20),
		(kfd_mem_limit.max_system_mem_limit >> 20),
@@ -358,7 +358,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
	if (ret)
	if (ret)
		return ret;
		return ret;


	return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
	return amdgpu_sync_fence(sync, vm->last_update, false);
}
}


static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
@@ -750,7 +750,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,


	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);


	amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
	amdgpu_sync_fence(sync, bo_va->last_pt_update, false);


	return 0;
	return 0;
}
}
@@ -769,7 +769,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
		return ret;
		return ret;
	}
	}


	return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
	return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
}
}


static int map_bo_to_gpuvm(struct amdgpu_device *adev,
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -1674,10 +1674,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
				struct mm_struct *mm)
				struct mm_struct *mm)
{
{
	struct amdkfd_process_info *process_info = mem->process_info;
	struct amdkfd_process_info *process_info = mem->process_info;
	int invalid, evicted_bos;
	int evicted_bos;
	int r = 0;
	int r = 0;


	invalid = atomic_inc_return(&mem->invalid);
	atomic_inc(&mem->invalid);
	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
	if (evicted_bos == 1) {
	if (evicted_bos == 1) {
		/* First eviction, stop the queues */
		/* First eviction, stop the queues */
@@ -2048,7 +2048,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
			goto validate_map_fail;
			goto validate_map_fail;
		}
		}
		ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
		if (ret) {
		if (ret) {
			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
			goto validate_map_fail;
			goto validate_map_fail;
+2 −18
Original line number Original line Diff line number Diff line
@@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
		path_size += le16_to_cpu(path->usSize);
		path_size += le16_to_cpu(path->usSize);


		if (device_support & le16_to_cpu(path->usDeviceTag)) {
		if (device_support & le16_to_cpu(path->usDeviceTag)) {
			uint8_t con_obj_id, con_obj_num, con_obj_type;
			uint8_t con_obj_id =

			con_obj_id =
			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
			    >> OBJECT_ID_SHIFT;
			    >> OBJECT_ID_SHIFT;
			con_obj_num =
			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
			    >> ENUM_ID_SHIFT;
			con_obj_type =
			    (le16_to_cpu(path->usConnObjectId) &
			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;


			/* Skip TV/CV support */
			/* Skip TV/CV support */
			if ((le16_to_cpu(path->usDeviceTag) ==
			if ((le16_to_cpu(path->usDeviceTag) ==
@@ -373,15 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
			router.ddc_valid = false;
			router.ddc_valid = false;
			router.cd_valid = false;
			router.cd_valid = false;
			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
				uint8_t grph_obj_type =

				grph_obj_id =
				    (le16_to_cpu(path->usGraphicObjIds[j]) &
				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
				grph_obj_num =
				    (le16_to_cpu(path->usGraphicObjIds[j]) &
				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
				grph_obj_type =
				    (le16_to_cpu(path->usGraphicObjIds[j]) &
				    (le16_to_cpu(path->usGraphicObjIds[j]) &
				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;


Loading