diff options
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2999-drm-amdgpu-Add-navi10-kfd-support-for-amdgpu-v3.patch')
-rw-r--r-- | meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2999-drm-amdgpu-Add-navi10-kfd-support-for-amdgpu-v3.patch | 1076 |
1 files changed, 1076 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2999-drm-amdgpu-Add-navi10-kfd-support-for-amdgpu-v3.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2999-drm-amdgpu-Add-navi10-kfd-support-for-amdgpu-v3.patch new file mode 100644 index 00000000..9fd5c79a --- /dev/null +++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2999-drm-amdgpu-Add-navi10-kfd-support-for-amdgpu-v3.patch @@ -0,0 +1,1076 @@ +From b6cb691101faba6dcc77c6f395a0bfd5c55f23fc Mon Sep 17 00:00:00 2001 +From: Hawking Zhang <Hawking.Zhang@amd.com> +Date: Tue, 5 Mar 2019 19:59:30 +0800 +Subject: [PATCH 2999/4256] drm/amdgpu: Add navi10 kfd support for amdgpu (v3) + +KFD (Kernel Fusion Driver) is the compute backend driver +for AMD GPUs. + +v2: squash in updates (Alex) +v3: fix warnings (Alex) + +Change-Id: I173523953bc281a7a4df9ebf87d865f3bbbc32c4 +Signed-off-by: Oak Zeng <Oak.Zeng@amd.com> +Signed-off-by: Philip Cox <Philip.Cox@amd.com> +Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> +Acked-by: Alex Deucher <alexander.deucher@amd.com> +Signed-off-by: Alex Deucher <alexander.deucher@amd.com> +--- + drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 16 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + + .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 975 ++++++++++++++++++ + 4 files changed, 991 insertions(+), 4 deletions(-) + create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c + +diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile +index 35998d3b60eb..bb2e00bbdee1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/Makefile ++++ b/drivers/gpu/drm/amd/amdgpu/Makefile +@@ -169,7 +169,8 @@ amdgpu-y += \ + amdgpu_amdkfd_gpuvm.o \ + amdgpu_amdkfd_gfx_v8.o \ + amdgpu_amdkfd_gfx_v9.o \ +- amdgpu_amdkfd_arcturus.o ++ amdgpu_amdkfd_arcturus.o \ ++ amdgpu_amdkfd_gfx_v10.o + + ifneq ($(CONFIG_DRM_AMDGPU_CIK),) + amdgpu-y += amdgpu_amdkfd_gfx_v7.o +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +index 78b78f898de1..859763c7f419 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +@@ -96,6 +96,8 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) + break; + case CHIP_ARCTURUS: + kfd2kgd = amdgpu_amdkfd_arcturus_get_functions(); ++ case CHIP_NAVI10: ++ kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions(); + break; + default: + dev_info(adev->dev, "kfd not supported on this ASIC\n"); +@@ -457,9 +459,12 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, + + if (amdgpu_sriov_vf(adev)) + mem_info->mem_clk_max = adev->clock.default_mclk / 100; +- else if (adev->powerplay.pp_funcs) +- mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; +- else ++ else if (adev->powerplay.pp_funcs) { ++ if (amdgpu_emu_mode == 1) ++ mem_info->mem_clk_max = 0; ++ else ++ mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; ++ } else + mem_info->mem_clk_max = 100; + } + +@@ -739,6 +744,11 @@ struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) + return NULL; + } + ++struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void) ++{ ++ return NULL; ++} ++ + struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, + const struct kfd2kgd_calls *f2g) + { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +index cec816566833..a344f37e48c0 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +@@ -142,6 +142,7 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); + struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); + struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); + struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void); ++struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void); + int amdgpu_amdkfd_copy_mem_to_mem(struct kgd_dev *kgd, struct kgd_mem *src_mem, + uint64_t src_offset, struct kgd_mem *dst_mem, + uint64_t dest_offset, uint64_t size, struct dma_fence **f, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +new file mode 100644 +index 000000000000..39ffb078beb4 +--- /dev/null ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +@@ -0,0 +1,975 @@ ++/* ++ * Copyright 2019 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#undef pr_fmt ++#define pr_fmt(fmt) "kfd2kgd: " fmt ++ ++#include <linux/module.h> ++#include <linux/fdtable.h> ++#include <linux/uaccess.h> ++#include <linux/firmware.h> ++#include <linux/mmu_context.h> ++#include <drm/drmP.h> ++#include "amdgpu.h" ++#include "amdgpu_amdkfd.h" ++#include "amdgpu_ucode.h" ++#include "soc15_hw_ip.h" ++#include "gc/gc_10_1_0_offset.h" ++#include "gc/gc_10_1_0_sh_mask.h" ++#include "navi10_enum.h" ++#include "athub/athub_2_0_0_offset.h" ++#include "athub/athub_2_0_0_sh_mask.h" ++#include "oss/osssys_5_0_0_offset.h" ++#include "oss/osssys_5_0_0_sh_mask.h" ++#include "soc15_common.h" ++#include "v10_structs.h" ++#include "nv.h" ++#include "nvd.h" ++ ++enum hqd_dequeue_request_type { ++ NO_ACTION = 0, ++ DRAIN_PIPE, ++ RESET_WAVES, ++ SAVE_WAVES ++}; ++ ++/* ++ * Register access functions ++ */ ++ ++static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, ++ uint32_t sh_mem_config, ++ uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, ++ uint32_t sh_mem_bases); ++static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, ++ unsigned int vmid); ++static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); ++static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, ++ uint32_t queue_id, uint32_t __user *wptr, ++ uint32_t wptr_shift, uint32_t wptr_mask, ++ struct mm_struct *mm); ++static int kgd_hqd_dump(struct kgd_dev *kgd, ++ uint32_t pipe_id, uint32_t queue_id, ++ uint32_t (**dump)[2], uint32_t *n_regs); ++static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, ++ uint32_t __user *wptr, struct mm_struct *mm); ++static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, ++ uint32_t engine_id, uint32_t queue_id, ++ uint32_t (**dump)[2], uint32_t *n_regs); ++static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, ++ uint32_t pipe_id, uint32_t queue_id); ++static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); ++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, ++ enum kfd_preempt_type reset_type, ++ unsigned int utimeout, uint32_t pipe_id, ++ uint32_t queue_id); ++static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, ++ unsigned int utimeout); ++#if 0 ++static uint32_t get_watch_base_addr(struct amdgpu_device *adev); ++#endif ++static int kgd_address_watch_disable(struct kgd_dev *kgd); ++static int kgd_address_watch_execute(struct kgd_dev *kgd, ++ unsigned int watch_point_id, ++ uint32_t cntl_val, ++ uint32_t addr_hi, ++ uint32_t addr_lo); ++static int kgd_wave_control_execute(struct kgd_dev *kgd, ++ uint32_t gfx_index_val, ++ uint32_t sq_cmd); ++static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, ++ unsigned int watch_point_id, ++ unsigned int reg_offset); ++ ++static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, ++ uint8_t vmid); ++static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, ++ uint8_t vmid); ++static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, ++ uint64_t page_table_base); ++static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); ++static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); ++ ++/* Because of REG_GET_FIELD() being used, we put this function in the ++ * asic specific file. ++ */ ++static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, ++ struct tile_config *config) ++{ ++ struct amdgpu_device *adev = (struct amdgpu_device *)kgd; ++ ++ config->gb_addr_config = adev->gfx.config.gb_addr_config; ++#if 0 ++/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but ++ * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu ++ * changes commented out related code, doing the same here for now but ++ * need to sync with Ken et al ++ */ ++ config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, ++ MC_ARB_RAMCFG, NOOFBANK); ++ config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, ++ MC_ARB_RAMCFG, NOOFRANKS); ++#endif ++ ++ config->tile_config_ptr = adev->gfx.config.tile_mode_array; ++ config->num_tile_configs = ++ ARRAY_SIZE(adev->gfx.config.tile_mode_array); ++ config->macro_tile_config_ptr = ++ adev->gfx.config.macrotile_mode_array; ++ config->num_macro_tile_configs = ++ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); ++ ++ return 0; ++} ++ ++static const struct kfd2kgd_calls kfd2kgd = { ++ .program_sh_mem_settings = kgd_program_sh_mem_settings, ++ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, ++ .init_interrupts = kgd_init_interrupts, ++ .hqd_load = kgd_hqd_load, ++ .hqd_sdma_load = kgd_hqd_sdma_load, ++ .hqd_dump = kgd_hqd_dump, ++ .hqd_sdma_dump = kgd_hqd_sdma_dump, ++ .hqd_is_occupied = kgd_hqd_is_occupied, ++ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, ++ .hqd_destroy = kgd_hqd_destroy, ++ .hqd_sdma_destroy = kgd_hqd_sdma_destroy, ++ .address_watch_disable = kgd_address_watch_disable, ++ .address_watch_execute = kgd_address_watch_execute, ++ .wave_control_execute = kgd_wave_control_execute, ++ .address_watch_get_offset = kgd_address_watch_get_offset, ++ .get_atc_vmid_pasid_mapping_pasid = ++ get_atc_vmid_pasid_mapping_pasid, ++ .get_atc_vmid_pasid_mapping_valid = ++ get_atc_vmid_pasid_mapping_valid, ++ .invalidate_tlbs = invalidate_tlbs, ++ .invalidate_tlbs_vmid = invalidate_tlbs_vmid, ++ .set_vm_context_page_table_base = set_vm_context_page_table_base, ++ .get_tile_config = amdgpu_amdkfd_get_tile_config, ++}; ++ ++struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions() ++{ ++ return (struct kfd2kgd_calls *)&kfd2kgd; ++} ++ ++static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) ++{ ++ return (struct amdgpu_device *)kgd; ++} ++ ++static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, ++ uint32_t queue, uint32_t vmid) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ ++ mutex_lock(&adev->srbm_mutex); ++ nv_grbm_select(adev, mec, pipe, queue, vmid); ++} ++ ++static void unlock_srbm(struct kgd_dev *kgd) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ ++ nv_grbm_select(adev, 0, 0, 0, 0); ++ mutex_unlock(&adev->srbm_mutex); ++} ++ ++static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, ++ uint32_t queue_id) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ ++ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; ++ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); ++ ++ lock_srbm(kgd, mec, pipe, queue_id, 0); ++} ++ ++static uint32_t get_queue_mask(struct amdgpu_device *adev, ++ uint32_t pipe_id, uint32_t queue_id) ++{ ++ unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe + ++ queue_id) & 31; ++ ++ return ((uint32_t)1) << bit; ++} ++ ++static void release_queue(struct kgd_dev *kgd) ++{ ++ unlock_srbm(kgd); ++} ++ ++static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, ++ uint32_t sh_mem_config, ++ uint32_t sh_mem_ape1_base, ++ uint32_t sh_mem_ape1_limit, ++ uint32_t sh_mem_bases) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ ++ lock_srbm(kgd, 0, 0, 0, vmid); ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); ++ /* APE1 no longer exists on GFX9 */ ++ ++ unlock_srbm(kgd); ++} ++ ++static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, ++ unsigned int vmid) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ ++ /* ++ * We have to assume that there is no outstanding mapping. ++ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because ++ * a mapping is in progress or because a mapping finished ++ * and the SW cleared it. ++ * So the protocol is to always wait & clear. ++ */ ++ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | ++ ATC_VMID0_PASID_MAPPING__VALID_MASK; ++ ++ pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping); ++ /* ++ * need to do this twice, once for gfx and once for mmhub ++ * for ATC add 16 to VMID for mmhub, for IH different registers. ++ * ATC_VMID0..15 registers are separate from ATC_VMID16..31. ++ */ ++ ++ pr_debug("ATHUB, reg %x\n",SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); ++ WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, ++ pasid_mapping); ++ ++#if 0 ++ /* TODO: uncomment this code when the hardware support is ready. */ ++ while (!(RREG32(SOC15_REG_OFFSET( ++ ATHUB, 0, ++ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & ++ (1U << vmid))) ++ cpu_relax(); ++ ++ pr_debug("ATHUB mapping update finished\n"); ++ WREG32(SOC15_REG_OFFSET(ATHUB, 0, ++ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS), ++ 1U << vmid); ++#endif ++ ++ /* Mapping vmid to pasid also for IH block */ ++ pr_debug("update mapping for IH block and mmhub"); ++ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, ++ pasid_mapping); ++ ++ return 0; ++} ++ ++/* TODO - RING0 form of field is obsolete, seems to date back to SI ++ * but still works ++ */ ++ ++static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ uint32_t mec; ++ uint32_t pipe; ++ ++ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; ++ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); ++ ++ lock_srbm(kgd, mec, pipe, 0, 0); ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), ++ CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | ++ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); ++ ++ unlock_srbm(kgd); ++ ++ return 0; ++} ++ ++static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, ++ unsigned int engine_id, ++ unsigned int queue_id) ++{ ++ uint32_t base[2] = { ++ SOC15_REG_OFFSET(SDMA0, 0, ++ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, ++ /* On gfx10, mmSDMA1_xxx registers are defined NOT based ++ * on SDMA1 base address (dw 0x1860) but based on SDMA0 ++ * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL ++ * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc ++ * below ++ */ ++ SOC15_REG_OFFSET(SDMA1, 0, ++ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL ++ }; ++ uint32_t retval; ++ ++ retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - ++ mmSDMA0_RLC0_RB_CNTL); ++ ++ pr_debug("sdma base address: 0x%x\n", retval); ++ ++ return retval; ++} ++ ++#if 0 ++static uint32_t get_watch_base_addr(struct amdgpu_device *adev) ++{ ++ uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) - ++ mmTCP_WATCH0_ADDR_H; ++ ++ pr_debug("kfd: reg watch base address: 0x%x\n", retval); ++ ++ return retval; ++} ++#endif ++ ++static inline struct v10_compute_mqd *get_mqd(void *mqd) ++{ ++ return (struct v10_compute_mqd *)mqd; ++} ++ ++static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) ++{ ++ return (struct v10_sdma_mqd *)mqd; ++} ++ ++static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, ++ uint32_t queue_id, uint32_t __user *wptr, ++ uint32_t wptr_shift, uint32_t wptr_mask, ++ struct mm_struct *mm) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ struct v10_compute_mqd *m; ++ uint32_t *mqd_hqd; ++ uint32_t reg, hqd_base, data; ++ ++ m = get_mqd(mqd); ++ ++ pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); ++ acquire_queue(kgd, pipe_id, queue_id); ++ ++ /* HIQ is set during driver init period with vmid set to 0*/ ++ if (m->cp_hqd_vmid == 0) { ++ uint32_t value, mec, pipe; ++ ++ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; ++ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); ++ ++ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", ++ mec, pipe, queue_id); ++ value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); ++ value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, ++ ((mec << 5) | (pipe << 3) | queue_id | 0x80)); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); ++ } ++ ++ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ ++ mqd_hqd = &m->cp_mqd_base_addr_lo; ++ hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); ++ ++ for (reg = hqd_base; ++ reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) ++ WREG32(reg, mqd_hqd[reg - hqd_base]); ++ ++ ++ /* Activate doorbell logic before triggering WPTR poll. */ ++ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, ++ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); ++ ++ if (wptr) { ++ /* Don't read wptr with get_user because the user ++ * context may not be accessible (if this function ++ * runs in a work queue). Instead trigger a one-shot ++ * polling read from memory in the CP. This assumes ++ * that wptr is GPU-accessible in the queue's VMID via ++ * ATC or SVM. WPTR==RPTR before starting the poll so ++ * the CP starts fetching new commands from the right ++ * place. ++ * ++ * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit ++ * tricky. Assume that the queue didn't overflow. The ++ * number of valid bits in the 32-bit RPTR depends on ++ * the queue size. The remaining bits are taken from ++ * the saved 64-bit WPTR. If the WPTR wrapped, add the ++ * queue size. ++ */ ++ uint32_t queue_size = ++ 2 << REG_GET_FIELD(m->cp_hqd_pq_control, ++ CP_HQD_PQ_CONTROL, QUEUE_SIZE); ++ uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); ++ ++ if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) ++ guessed_wptr += queue_size; ++ guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); ++ guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), ++ lower_32_bits(guessed_wptr)); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), ++ upper_32_bits(guessed_wptr)); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), ++ lower_32_bits((uint64_t)wptr)); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), ++ upper_32_bits((uint64_t)wptr)); ++ pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id)); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), ++ get_queue_mask(adev, pipe_id, queue_id)); ++ } ++ ++ /* Start the EOP fetcher */ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), ++ REG_SET_FIELD(m->cp_hqd_eop_rptr, ++ CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); ++ ++ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); ++ ++ release_queue(kgd); ++ ++ return 0; ++} ++ ++static int kgd_hqd_dump(struct kgd_dev *kgd, ++ uint32_t pipe_id, uint32_t queue_id, ++ uint32_t (**dump)[2], uint32_t *n_regs) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ uint32_t i = 0, reg; ++#define HQD_N_REGS 56 ++#define DUMP_REG(addr) do { \ ++ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ ++ break; \ ++ (*dump)[i][0] = (addr) << 2; \ ++ (*dump)[i++][1] = RREG32(addr); \ ++ } while (0) ++ ++ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); ++ if (*dump == NULL) ++ return -ENOMEM; ++ ++ acquire_queue(kgd, pipe_id, queue_id); ++ ++ for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); ++ reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) ++ DUMP_REG(reg); ++ ++ release_queue(kgd); ++ ++ WARN_ON_ONCE(i != HQD_N_REGS); ++ *n_regs = i; ++ ++ return 0; ++} ++ ++static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, ++ uint32_t __user *wptr, struct mm_struct *mm) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ struct v10_sdma_mqd *m; ++ uint32_t sdma_base_addr, sdmax_gfx_context_cntl; ++ unsigned long end_jiffies; ++ uint32_t data; ++ uint64_t data64; ++ uint64_t __user *wptr64 = (uint64_t __user *)wptr; ++ ++ m = get_sdma_mqd(mqd); ++ sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, ++ m->sdma_queue_id); ++ pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id); ++ sdmax_gfx_context_cntl = m->sdma_engine_id ? ++ SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : ++ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); ++ ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, ++ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); ++ ++ end_jiffies = msecs_to_jiffies(2000) + jiffies; ++ while (true) { ++ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); ++ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) ++ break; ++ if (time_after(jiffies, end_jiffies)) ++ return -ETIME; ++ usleep_range(500, 1000); ++ } ++ data = RREG32(sdmax_gfx_context_cntl); ++ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, ++ RESUME_CTX, 0); ++ WREG32(sdmax_gfx_context_cntl, data); ++ ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, ++ m->sdmax_rlcx_doorbell_offset); ++ ++ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ++ ENABLE, 1); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, ++ m->sdmax_rlcx_rb_rptr_hi); ++ ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); ++ if (read_user_wptr(mm, wptr64, data64)) { ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, ++ lower_32_bits(data64)); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, ++ upper_32_bits(data64)); ++ } else { ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, ++ m->sdmax_rlcx_rb_rptr); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, ++ m->sdmax_rlcx_rb_rptr_hi); ++ } ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); ++ ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, ++ m->sdmax_rlcx_rb_base_hi); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, ++ m->sdmax_rlcx_rb_rptr_addr_lo); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, ++ m->sdmax_rlcx_rb_rptr_addr_hi); ++ ++ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, ++ RB_ENABLE, 1); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); ++ ++ return 0; ++} ++ ++static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, ++ uint32_t engine_id, uint32_t queue_id, ++ uint32_t (**dump)[2], uint32_t *n_regs) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); ++ uint32_t i = 0, reg; ++#undef HQD_N_REGS ++#define HQD_N_REGS (19+6+7+10) ++ ++ pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id); ++ pr_debug("sdma base addr %x\n", sdma_base_addr); ++ ++ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); ++ if (*dump == NULL) ++ return -ENOMEM; ++ ++ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) ++ DUMP_REG(sdma_base_addr + reg); ++ for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) ++ DUMP_REG(sdma_base_addr + reg); ++ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; ++ reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) ++ DUMP_REG(sdma_base_addr + reg); ++ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; ++ reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) ++ DUMP_REG(sdma_base_addr + reg); ++ ++ WARN_ON_ONCE(i != HQD_N_REGS); ++ *n_regs = i; ++ ++ return 0; ++} ++ ++static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, ++ uint32_t pipe_id, uint32_t queue_id) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ uint32_t act; ++ bool retval = false; ++ uint32_t low, high; ++ ++ acquire_queue(kgd, pipe_id, queue_id); ++ act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); ++ if (act) { ++ low = lower_32_bits(queue_address >> 8); ++ high = upper_32_bits(queue_address >> 8); ++ ++ if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && ++ high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) ++ retval = true; ++ } ++ release_queue(kgd); ++ return retval; ++} ++ ++static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ struct v10_sdma_mqd *m; ++ uint32_t sdma_base_addr; ++ uint32_t sdma_rlc_rb_cntl; ++ ++ m = get_sdma_mqd(mqd); ++ sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, ++ m->sdma_queue_id); ++ ++ sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); ++ ++ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) ++ return true; ++ ++ return false; ++} ++ ++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, ++ enum kfd_preempt_type reset_type, ++ unsigned int utimeout, uint32_t pipe_id, ++ uint32_t queue_id) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ enum hqd_dequeue_request_type type; ++ unsigned long end_jiffies; ++ uint32_t temp; ++ struct v10_compute_mqd *m = get_mqd(mqd); ++ ++#if 0 ++ unsigned long flags; ++ int retry; ++#endif ++ ++ acquire_queue(kgd, pipe_id, queue_id); ++ ++ if (m->cp_hqd_vmid == 0) ++ WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); ++ ++ switch (reset_type) { ++ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: ++ type = DRAIN_PIPE; ++ break; ++ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: ++ type = RESET_WAVES; ++ break; ++ default: ++ type = DRAIN_PIPE; ++ break; ++ } ++ ++#if 0 /* Is this still needed? */ ++ /* Workaround: If IQ timer is active and the wait time is close to or ++ * equal to 0, dequeueing is not safe. Wait until either the wait time ++ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is ++ * cleared before continuing. Also, ensure wait times are set to at ++ * least 0x3. ++ */ ++ local_irq_save(flags); ++ preempt_disable(); ++ retry = 5000; /* wait for 500 usecs at maximum */ ++ while (true) { ++ temp = RREG32(mmCP_HQD_IQ_TIMER); ++ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { ++ pr_debug("HW is processing IQ\n"); ++ goto loop; ++ } ++ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { ++ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) ++ == 3) /* SEM-rearm is safe */ ++ break; ++ /* Wait time 3 is safe for CP, but our MMIO read/write ++ * time is close to 1 microsecond, so check for 10 to ++ * leave more buffer room ++ */ ++ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) ++ >= 10) ++ break; ++ pr_debug("IQ timer is active\n"); ++ } else ++ break; ++loop: ++ if (!retry) { ++ pr_err("CP HQD IQ timer status time out\n"); ++ break; ++ } ++ ndelay(100); ++ --retry; ++ } ++ retry = 1000; ++ while (true) { ++ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); ++ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) ++ break; ++ pr_debug("Dequeue request is pending\n"); ++ ++ if (!retry) { ++ pr_err("CP HQD dequeue request time out\n"); ++ break; ++ } ++ ndelay(100); ++ --retry; ++ } ++ local_irq_restore(flags); ++ preempt_enable(); ++#endif ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); ++ ++ end_jiffies = (utimeout * HZ / 1000) + jiffies; ++ while (true) { ++ temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); ++ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) ++ break; ++ if (time_after(jiffies, end_jiffies)) { ++ pr_err("cp queue preemption time out.\n"); ++ release_queue(kgd); ++ return -ETIME; ++ } ++ usleep_range(500, 1000); ++ } ++ ++ release_queue(kgd); ++ return 0; ++} ++ ++static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, ++ unsigned int utimeout) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ struct v10_sdma_mqd *m; ++ uint32_t sdma_base_addr; ++ uint32_t temp; ++ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; ++ ++ m = get_sdma_mqd(mqd); ++ sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, ++ m->sdma_queue_id); ++ ++ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); ++ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); ++ ++ while (true) { ++ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); ++ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) ++ break; ++ if (time_after(jiffies, end_jiffies)) ++ return -ETIME; ++ usleep_range(500, 1000); ++ } ++ ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); ++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, ++ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | ++ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); ++ ++ m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); ++ m->sdmax_rlcx_rb_rptr_hi = ++ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); ++ ++ return 0; ++} ++ ++static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, ++ uint8_t vmid) ++{ ++ uint32_t reg; ++ struct amdgpu_device *adev = (struct amdgpu_device *) kgd; ++ ++ reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) ++ + vmid); ++ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; ++} ++ ++static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, ++ uint8_t vmid) ++{ ++ uint32_t reg; ++ struct amdgpu_device *adev = (struct amdgpu_device *) kgd; ++ ++ reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) ++ + vmid); ++ return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; ++} ++ ++static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) ++{ ++ struct amdgpu_device *adev = (struct amdgpu_device *) kgd; ++ uint32_t req = (1 << vmid) | ++ (0 << GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT) |/* legacy */ ++ GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK | ++ GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK | ++ GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK | ++ GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK | ++ GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK; ++ ++ mutex_lock(&adev->srbm_mutex); ++ ++ /* Use light weight invalidation. ++ * ++ * TODO 1: agree on the right set of invalidation registers for ++ * KFD use. Use the last one for now. Invalidate only GCHUB as ++ * SDMA is now moved to GCHUB ++ * ++ * TODO 2: support range-based invalidation, requires kfg2kgd ++ * interface change ++ */ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32), ++ 0xffffffff); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32), ++ 0x0000001f); ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ), req); ++ ++ while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK)) & ++ (1 << vmid))) ++ cpu_relax(); ++ ++ mutex_unlock(&adev->srbm_mutex); ++} ++ ++static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) ++{ ++ signed long r; ++ uint32_t seq; ++ struct amdgpu_ring *ring = &adev->gfx.kiq.ring; ++ ++ spin_lock(&adev->gfx.kiq.ring_lock); ++ amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/ ++ amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); ++ amdgpu_ring_write(ring, ++ PACKET3_INVALIDATE_TLBS_DST_SEL(1) | ++ PACKET3_INVALIDATE_TLBS_PASID(pasid)); ++ amdgpu_fence_emit_polling(ring, &seq); ++ amdgpu_ring_commit(ring); ++ spin_unlock(&adev->gfx.kiq.ring_lock); ++ ++ r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); ++ if (r < 1) { ++ DRM_ERROR("wait for kiq fence error: %ld.\n", r); ++ return -ETIME; ++ } ++ ++ return 0; ++} ++ ++static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) ++{ ++ struct amdgpu_device *adev = (struct amdgpu_device *) kgd; ++ int vmid; ++ struct amdgpu_ring *ring = &adev->gfx.kiq.ring; ++ ++ if (amdgpu_emu_mode == 0 && ring->sched.ready) ++ return invalidate_tlbs_with_kiq(adev, pasid); ++ ++ for (vmid = 0; vmid < 16; vmid++) { ++ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) ++ continue; ++ if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { ++ if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid) ++ == pasid) { ++ write_vmid_invalidate_request(kgd, vmid); ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) ++{ ++ struct amdgpu_device *adev = (struct amdgpu_device *) kgd; ++ ++ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { ++ pr_err("non kfd vmid %d\n", vmid); ++ return 0; ++ } ++ ++ write_vmid_invalidate_request(kgd, vmid); ++ return 0; ++} ++ ++static int kgd_address_watch_disable(struct kgd_dev *kgd) ++{ ++ return 0; ++} ++ ++static int kgd_address_watch_execute(struct kgd_dev *kgd, ++ unsigned int watch_point_id, ++ uint32_t cntl_val, ++ uint32_t addr_hi, ++ uint32_t addr_lo) ++{ ++ return 0; ++} ++ ++static int kgd_wave_control_execute(struct kgd_dev *kgd, ++ uint32_t gfx_index_val, ++ uint32_t sq_cmd) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ uint32_t data = 0; ++ ++ mutex_lock(&adev->grbm_idx_mutex); ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); ++ ++ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, ++ INSTANCE_BROADCAST_WRITES, 1); ++ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, ++ SA_BROADCAST_WRITES, 1); ++ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, ++ SE_BROADCAST_WRITES, 1); ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); ++ mutex_unlock(&adev->grbm_idx_mutex); ++ ++ return 0; ++} ++ ++static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, ++ unsigned int watch_point_id, ++ unsigned int reg_offset) ++{ ++ return 0; ++} ++ ++static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, ++ uint64_t page_table_base) ++{ ++ struct amdgpu_device *adev = get_amdgpu_device(kgd); ++ uint64_t base = page_table_base | AMDGPU_PTE_VALID; ++ ++ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { ++ pr_err("trying to set page table base for wrong VMID %u\n", ++ vmid); ++ return; ++ } ++ ++ /* TODO: take advantage of per-process address space size. For ++ * now, all processes share the same address space size, like ++ * on GFX8 and older. ++ */ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), ++ lower_32_bits(adev->vm_manager.max_pfn - 1)); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), ++ upper_32_bits(adev->vm_manager.max_pfn - 1)); ++ ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); ++ WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); ++} +-- +2.17.1 + |