aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0133-drm-amdkfd-Add-wave-control-operation-to-debugger.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0133-drm-amdkfd-Add-wave-control-operation-to-debugger.patch')
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0133-drm-amdkfd-Add-wave-control-operation-to-debugger.patch535
1 files changed, 535 insertions, 0 deletions
diff --git a/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0133-drm-amdkfd-Add-wave-control-operation-to-debugger.patch b/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0133-drm-amdkfd-Add-wave-control-operation-to-debugger.patch
new file mode 100644
index 00000000..9df04ed5
--- /dev/null
+++ b/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0133-drm-amdkfd-Add-wave-control-operation-to-debugger.patch
@@ -0,0 +1,535 @@
+From 788bf83db3017f083ebb84c93f101607c769c3a7 Mon Sep 17 00:00:00 2001
+From: Yair Shachar <yair.shachar@amd.com>
+Date: Wed, 20 May 2015 13:58:12 +0300
+Subject: [PATCH 0133/1050] drm/amdkfd: Add wave control operation to debugger
+
+The wave control operation supports several command types executed upon
+existing wave fronts that belong to the currently debugged process.
+
+The available commands are:
+
+HALT - Freeze wave front(s) execution
+RESUME - Resume freezed wave front(s) execution
+KILL - Kill existing wave front(s)
+
+Signed-off-by: Yair Shachar <yair.shachar@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 405 +++++++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c | 16 +
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h | 5 +-
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +
+ 5 files changed, 430 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index eed4a83..ee33b86 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -47,6 +47,125 @@ static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
+ dev->kfd2kgd->address_watch_disable(dev->kgd);
+ }
+
++static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
++ unsigned int pasid, uint64_t vmid0_address,
++ uint32_t *packet_buff, size_t size_in_bytes)
++{
++ struct pm4__release_mem *rm_packet;
++ struct pm4__indirect_buffer_pasid *ib_packet;
++ struct kfd_mem_obj *mem_obj;
++ size_t pq_packets_size_in_bytes;
++ union ULARGE_INTEGER *largep;
++ union ULARGE_INTEGER addr;
++ struct kernel_queue *kq;
++ uint64_t *rm_state;
++ unsigned int *ib_packet_buff;
++ int status;
++
++ BUG_ON(!dbgdev || !dbgdev->kq || !packet_buff || !size_in_bytes);
++
++ kq = dbgdev->kq;
++
++ pq_packets_size_in_bytes = sizeof(struct pm4__release_mem) +
++ sizeof(struct pm4__indirect_buffer_pasid);
++
++ /*
++ * We acquire a buffer from DIQ
++ * The receive packet buff will be sitting on the Indirect Buffer
++ * and in the PQ we put the IB packet + sync packet(s).
++ */
++ status = kq->ops.acquire_packet_buffer(kq,
++ pq_packets_size_in_bytes / sizeof(uint32_t),
++ &ib_packet_buff);
++ if (status != 0) {
++ pr_err("amdkfd: acquire_packet_buffer failed\n");
++ return status;
++ }
++
++ memset(ib_packet_buff, 0, pq_packets_size_in_bytes);
++
++ ib_packet = (struct pm4__indirect_buffer_pasid *) (ib_packet_buff);
++
++ ib_packet->header.count = 3;
++ ib_packet->header.opcode = IT_INDIRECT_BUFFER_PASID;
++ ib_packet->header.type = PM4_TYPE_3;
++
++ largep = (union ULARGE_INTEGER *) &vmid0_address;
++
++ ib_packet->bitfields2.ib_base_lo = largep->u.low_part >> 2;
++ ib_packet->bitfields3.ib_base_hi = largep->u.high_part;
++
++ ib_packet->control = (1 << 23) | (1 << 31) |
++ ((size_in_bytes / sizeof(uint32_t)) & 0xfffff);
++
++ ib_packet->bitfields5.pasid = pasid;
++
++ /*
++ * for now we use release mem for GPU-CPU synchronization
++ * Consider WaitRegMem + WriteData as a better alternative
++ * we get a GART allocations ( gpu/cpu mapping),
++ * for the sync variable, and wait until:
++ * (a) Sync with HW
++ * (b) Sync var is written by CP to mem.
++ */
++ rm_packet = (struct pm4__release_mem *) (ib_packet_buff +
++ (sizeof(struct pm4__indirect_buffer_pasid) /
++ sizeof(unsigned int)));
++
++ status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t),
++ &mem_obj);
++
++ if (status != 0) {
++ pr_err("amdkfd: Failed to allocate GART memory\n");
++ kq->ops.rollback_packet(kq);
++ return status;
++ }
++
++ rm_state = (uint64_t *) mem_obj->cpu_ptr;
++
++ *rm_state = QUEUESTATE__ACTIVE_COMPLETION_PENDING;
++
++ rm_packet->header.opcode = IT_RELEASE_MEM;
++ rm_packet->header.type = PM4_TYPE_3;
++ rm_packet->header.count = sizeof(struct pm4__release_mem) /
++ sizeof(unsigned int) - 2;
++
++ rm_packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
++ rm_packet->bitfields2.event_index =
++ event_index___release_mem__end_of_pipe;
++
++ rm_packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
++ rm_packet->bitfields2.atc = 0;
++ rm_packet->bitfields2.tc_wb_action_ena = 1;
++
++ addr.quad_part = mem_obj->gpu_addr;
++
++ rm_packet->bitfields4.address_lo_32b = addr.u.low_part >> 2;
++ rm_packet->address_hi = addr.u.high_part;
++
++ rm_packet->bitfields3.data_sel =
++ data_sel___release_mem__send_64_bit_data;
++
++ rm_packet->bitfields3.int_sel =
++ int_sel___release_mem__send_data_after_write_confirm;
++
++ rm_packet->bitfields3.dst_sel =
++ dst_sel___release_mem__memory_controller;
++
++ rm_packet->data_lo = QUEUESTATE__ACTIVE;
++
++ kq->ops.submit_packet(kq);
++
++ /* Wait till CP writes sync code: */
++ status = amdkfd_fence_wait_timeout(
++ (unsigned int *) rm_state,
++ QUEUESTATE__ACTIVE, 1500);
++
++ kfd_gtt_sa_free(dbgdev->dev, mem_obj);
++
++ return status;
++}
++
+ static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev)
+ {
+ BUG_ON(!dbgdev);
+@@ -117,6 +236,290 @@ static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
+ return status;
+ }
+
++static int dbgdev_wave_control_set_registers(
++ struct dbg_wave_control_info *wac_info,
++ union SQ_CMD_BITS *in_reg_sq_cmd,
++ union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
++{
++ int status;
++ union SQ_CMD_BITS reg_sq_cmd;
++ union GRBM_GFX_INDEX_BITS reg_gfx_index;
++ struct HsaDbgWaveMsgAMDGen2 *pMsg;
++
++ BUG_ON(!wac_info || !in_reg_sq_cmd || !in_reg_gfx_index);
++
++ reg_sq_cmd.u32All = 0;
++ reg_gfx_index.u32All = 0;
++ pMsg = &wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2;
++
++ switch (wac_info->mode) {
++ /* Send command to single wave */
++ case HSA_DBG_WAVEMODE_SINGLE:
++ /*
++ * Limit access to the process waves only,
++ * by setting vmid check
++ */
++ reg_sq_cmd.bits.check_vmid = 1;
++ reg_sq_cmd.bits.simd_id = pMsg->ui32.SIMD;
++ reg_sq_cmd.bits.wave_id = pMsg->ui32.WaveId;
++ reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_SINGLE;
++
++ reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray;
++ reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine;
++ reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU;
++
++ break;
++
++ /* Send command to all waves with matching VMID */
++ case HSA_DBG_WAVEMODE_BROADCAST_PROCESS:
++
++ reg_gfx_index.bits.sh_broadcast_writes = 1;
++ reg_gfx_index.bits.se_broadcast_writes = 1;
++ reg_gfx_index.bits.instance_broadcast_writes = 1;
++
++ reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
++
++ break;
++
++ /* Send command to all CU waves with matching VMID */
++ case HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU:
++
++ reg_sq_cmd.bits.check_vmid = 1;
++ reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
++
++ reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray;
++ reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine;
++ reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU;
++
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ switch (wac_info->operand) {
++ case HSA_DBG_WAVEOP_HALT:
++ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_HALT;
++ break;
++
++ case HSA_DBG_WAVEOP_RESUME:
++ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_RESUME;
++ break;
++
++ case HSA_DBG_WAVEOP_KILL:
++ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
++ break;
++
++ case HSA_DBG_WAVEOP_DEBUG:
++ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_DEBUG;
++ break;
++
++ case HSA_DBG_WAVEOP_TRAP:
++ if (wac_info->trapId < MAX_TRAPID) {
++ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_TRAP;
++ reg_sq_cmd.bits.trap_id = wac_info->trapId;
++ } else {
++ status = -EINVAL;
++ }
++ break;
++
++ default:
++ status = -EINVAL;
++ break;
++ }
++
++ if (status == 0) {
++ *in_reg_sq_cmd = reg_sq_cmd;
++ *in_reg_gfx_index = reg_gfx_index;
++ }
++
++ return status;
++}
++
++static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
++ struct dbg_wave_control_info *wac_info)
++{
++
++ int status;
++ union SQ_CMD_BITS reg_sq_cmd;
++ union GRBM_GFX_INDEX_BITS reg_gfx_index;
++ struct kfd_mem_obj *mem_obj;
++ uint32_t *packet_buff_uint;
++ struct pm4__set_config_reg *packets_vec;
++ size_t ib_size = sizeof(struct pm4__set_config_reg) * 3;
++
++ BUG_ON(!dbgdev || !wac_info);
++
++ reg_sq_cmd.u32All = 0;
++
++ status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
++ &reg_gfx_index);
++ if (status) {
++ pr_err("amdkfd: Failed to set wave control registers\n");
++ return status;
++ }
++
++ /* we do not control the VMID in DIQ,so reset it to a known value */
++ reg_sq_cmd.bits.vm_id = 0;
++
++ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
++
++ pr_debug("\t\t mode is: %u\n", wac_info->mode);
++ pr_debug("\t\t operand is: %u\n", wac_info->operand);
++ pr_debug("\t\t trap id is: %u\n", wac_info->trapId);
++ pr_debug("\t\t msg value is: %u\n",
++ wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
++ pr_debug("\t\t vmid is: N/A\n");
++
++ pr_debug("\t\t chk_vmid is : %u\n", reg_sq_cmd.bitfields.check_vmid);
++ pr_debug("\t\t command is : %u\n", reg_sq_cmd.bitfields.cmd);
++ pr_debug("\t\t queue id is : %u\n", reg_sq_cmd.bitfields.queue_id);
++ pr_debug("\t\t simd id is : %u\n", reg_sq_cmd.bitfields.simd_id);
++ pr_debug("\t\t mode is : %u\n", reg_sq_cmd.bitfields.mode);
++ pr_debug("\t\t vm_id is : %u\n", reg_sq_cmd.bitfields.vm_id);
++ pr_debug("\t\t wave_id is : %u\n", reg_sq_cmd.bitfields.wave_id);
++
++ pr_debug("\t\t ibw is : %u\n",
++ reg_gfx_index.bitfields.instance_broadcast_writes);
++ pr_debug("\t\t ii is : %u\n",
++ reg_gfx_index.bitfields.instance_index);
++ pr_debug("\t\t sebw is : %u\n",
++ reg_gfx_index.bitfields.se_broadcast_writes);
++ pr_debug("\t\t se_ind is : %u\n", reg_gfx_index.bitfields.se_index);
++ pr_debug("\t\t sh_ind is : %u\n", reg_gfx_index.bitfields.sh_index);
++ pr_debug("\t\t sbw is : %u\n",
++ reg_gfx_index.bitfields.sh_broadcast_writes);
++
++ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
++
++ status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
++
++ if (status != 0) {
++ pr_err("amdkfd: Failed to allocate GART memory\n");
++ return status;
++ }
++
++ packet_buff_uint = mem_obj->cpu_ptr;
++
++ memset(packet_buff_uint, 0, ib_size);
++
++ packets_vec = (struct pm4__set_config_reg *) packet_buff_uint;
++ packets_vec[0].header.count = 1;
++ packets_vec[0].header.opcode = IT_SET_UCONFIG_REG;
++ packets_vec[0].header.type = PM4_TYPE_3;
++ packets_vec[0].bitfields2.reg_offset =
++ GRBM_GFX_INDEX / (sizeof(uint32_t)) -
++ USERCONFIG_REG_BASE;
++
++ packets_vec[0].bitfields2.insert_vmid = 0;
++ packets_vec[0].reg_data[0] = reg_gfx_index.u32All;
++
++ packets_vec[1].header.count = 1;
++ packets_vec[1].header.opcode = IT_SET_CONFIG_REG;
++ packets_vec[1].header.type = PM4_TYPE_3;
++ packets_vec[1].bitfields2.reg_offset = SQ_CMD / (sizeof(uint32_t)) -
++ CONFIG_REG_BASE;
++
++ packets_vec[1].bitfields2.vmid_shift = SQ_CMD_VMID_OFFSET;
++ packets_vec[1].bitfields2.insert_vmid = 1;
++ packets_vec[1].reg_data[0] = reg_sq_cmd.u32All;
++
++ /* Restore the GRBM_GFX_INDEX register */
++
++ reg_gfx_index.u32All = 0;
++ reg_gfx_index.bits.sh_broadcast_writes = 1;
++ reg_gfx_index.bits.instance_broadcast_writes = 1;
++ reg_gfx_index.bits.se_broadcast_writes = 1;
++
++
++ packets_vec[2].ordinal1 = packets_vec[0].ordinal1;
++ packets_vec[2].bitfields2.reg_offset =
++ GRBM_GFX_INDEX / (sizeof(uint32_t)) -
++ USERCONFIG_REG_BASE;
++
++ packets_vec[2].bitfields2.insert_vmid = 0;
++ packets_vec[2].reg_data[0] = reg_gfx_index.u32All;
++
++ status = dbgdev_diq_submit_ib(
++ dbgdev,
++ wac_info->process->pasid,
++ mem_obj->gpu_addr,
++ packet_buff_uint,
++ ib_size);
++
++ if (status != 0)
++ pr_err("amdkfd: Failed to submit IB to DIQ\n");
++
++ kfd_gtt_sa_free(dbgdev->dev, mem_obj);
++
++ return status;
++}
++
++static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
++ struct dbg_wave_control_info *wac_info)
++{
++ int status;
++ union SQ_CMD_BITS reg_sq_cmd;
++ union GRBM_GFX_INDEX_BITS reg_gfx_index;
++ struct kfd_process_device *pdd;
++
++ BUG_ON(!dbgdev || !dbgdev->dev || !wac_info);
++
++ reg_sq_cmd.u32All = 0;
++
++ /* taking the VMID for that process on the safe way using PDD */
++ pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process);
++
++ if (!pdd) {
++ pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
++ return -EFAULT;
++ }
++ status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
++ &reg_gfx_index);
++ if (status) {
++ pr_err("amdkfd: Failed to set wave control registers\n");
++ return status;
++ }
++
++ /* for non DIQ we need to patch the VMID: */
++
++ reg_sq_cmd.bits.vm_id = pdd->qpd.vmid;
++
++ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
++
++ pr_debug("\t\t mode is: %u\n", wac_info->mode);
++ pr_debug("\t\t operand is: %u\n", wac_info->operand);
++ pr_debug("\t\t trap id is: %u\n", wac_info->trapId);
++ pr_debug("\t\t msg value is: %u\n",
++ wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
++ pr_debug("\t\t vmid is: %u\n", pdd->qpd.vmid);
++
++ pr_debug("\t\t chk_vmid is : %u\n", reg_sq_cmd.bitfields.check_vmid);
++ pr_debug("\t\t command is : %u\n", reg_sq_cmd.bitfields.cmd);
++ pr_debug("\t\t queue id is : %u\n", reg_sq_cmd.bitfields.queue_id);
++ pr_debug("\t\t simd id is : %u\n", reg_sq_cmd.bitfields.simd_id);
++ pr_debug("\t\t mode is : %u\n", reg_sq_cmd.bitfields.mode);
++ pr_debug("\t\t vm_id is : %u\n", reg_sq_cmd.bitfields.vm_id);
++ pr_debug("\t\t wave_id is : %u\n", reg_sq_cmd.bitfields.wave_id);
++
++ pr_debug("\t\t ibw is : %u\n",
++ reg_gfx_index.bitfields.instance_broadcast_writes);
++ pr_debug("\t\t ii is : %u\n",
++ reg_gfx_index.bitfields.instance_index);
++ pr_debug("\t\t sebw is : %u\n",
++ reg_gfx_index.bitfields.se_broadcast_writes);
++ pr_debug("\t\t se_ind is : %u\n", reg_gfx_index.bitfields.se_index);
++ pr_debug("\t\t sh_ind is : %u\n", reg_gfx_index.bitfields.sh_index);
++ pr_debug("\t\t sbw is : %u\n",
++ reg_gfx_index.bitfields.sh_broadcast_writes);
++
++ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
++
++ return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->kgd,
++ reg_gfx_index.u32All,
++ reg_sq_cmd.u32All);
++}
++
+ void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
+ enum DBGDEV_TYPE type)
+ {
+@@ -131,11 +534,13 @@ void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
+ case DBGDEV_TYPE_NODIQ:
+ pdbgdev->dbgdev_register = dbgdev_register_nodiq;
+ pdbgdev->dbgdev_unregister = dbgdev_unregister_nodiq;
++ pdbgdev->dbgdev_wave_control = dbgdev_wave_control_nodiq;
+ break;
+ case DBGDEV_TYPE_DIQ:
+ default:
+ pdbgdev->dbgdev_register = dbgdev_register_diq;
+ pdbgdev->dbgdev_unregister = dbgdev_unregister_diq;
++ pdbgdev->dbgdev_wave_control = dbgdev_wave_control_diq;
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+index 959be98..7e2c8e2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+@@ -133,3 +133,19 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
+
+ return 0;
+ }
++
++long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
++ struct dbg_wave_control_info *wac_info)
++{
++ BUG_ON(!pmgr || !pmgr->dbgdev || !wac_info);
++
++ /* Is the requests coming from the already registered process? */
++ if (pmgr->pasid != wac_info->process->pasid) {
++ pr_debug("H/W debugger support was not registered for requester pasid %d\n",
++ wac_info->process->pasid);
++ return -EINVAL;
++ }
++
++ return (long) pmgr->dbgdev->dbgdev_wave_control(pmgr->dbgdev, wac_info);
++}
++
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
+index 250cf88..6da9d36 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
+@@ -268,6 +268,8 @@ struct kfd_dbgdev {
+ /* virtualized function pointers to device dbg */
+ int (*dbgdev_register)(struct kfd_dbgdev *dbgdev);
+ int (*dbgdev_unregister)(struct kfd_dbgdev *dbgdev);
++ int (*dbgdev_wave_control)(struct kfd_dbgdev *dbgdev,
++ struct dbg_wave_control_info *wac_info);
+
+ };
+
+@@ -283,5 +285,6 @@ void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr);
+ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev);
+ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
+ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
+-
++long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
++ struct dbg_wave_control_info *wac_info);
+ #endif /* KFD_DBGMGR_H_ */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index a5dc822..b08ec05 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -915,7 +915,7 @@ out:
+ return retval;
+ }
+
+-static int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
++int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ unsigned int fence_value,
+ unsigned long timeout)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index a65a281..cd1f033 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -656,6 +656,10 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+ struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
+ unsigned int qid);
+
++int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
++ unsigned int fence_value,
++ unsigned long timeout);
++
+ /* Packet Manager */
+
+ #define KFD_HIQ_TIMEOUT (500)
+--
+1.9.1
+