aboutsummaryrefslogtreecommitdiffstats
path: root/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/1287-drm-amdkfd-Remove-BUG_ONs.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/1287-drm-amdkfd-Remove-BUG_ONs.patch')
-rw-r--r--meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/1287-drm-amdkfd-Remove-BUG_ONs.patch1186
1 files changed, 1186 insertions, 0 deletions
diff --git a/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/1287-drm-amdkfd-Remove-BUG_ONs.patch b/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/1287-drm-amdkfd-Remove-BUG_ONs.patch
new file mode 100644
index 00000000..c753aae0
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/1287-drm-amdkfd-Remove-BUG_ONs.patch
@@ -0,0 +1,1186 @@
+From d9466ef2137e57f394bfe9334b9a3dabaee74fcf Mon Sep 17 00:00:00 2001
+From: Kent Russell <kent.russell@amd.com>
+Date: Mon, 27 Mar 2017 10:43:45 -0400
+Subject: [PATCH 1287/4131] drm/amdkfd: Remove BUG_ONs
+
+FOr the most part, these are internal functions and we don't need to
+check the validity of the parameters. A BUG_ON would be essentially
+the same as a NULL reference anyways. Remove most of the BUG_ONs for
+simple parameter checking, and change the ones that matter to either
+WARN_ON (for void functions) or pr_err (for int functions)
+
+Change-Id: I7268b3c29bac5e87545a1a18ae5acdec8175b684
+Signed-off-by: Kent Russell <kent.russell@amd.com>
+
+ Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 16 -----
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c | 3 -
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 27 +++-----
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 71 +++-------------------
+ .../drm/amd/amdkfd/kfd_device_queue_manager_cik.c | 6 +-
+ .../drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 6 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 6 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 24 +-------
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 15 -----
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 11 ----
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 19 +-----
+ drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | 3 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 17 ++----
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 27 ++------
+ drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 2 -
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 22 +++----
+ 16 files changed, 44 insertions(+), 231 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index c69242f..f0ff2e0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -120,9 +120,6 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
+ static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+ {
+- BUG_ON(!dev);
+- BUG_ON(!cu);
+-
+ dev->node_props.cpu_cores_count = cu->num_cpu_cores;
+ dev->node_props.cpu_core_id_base = cu->processor_id_low;
+ #if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
+@@ -137,9 +134,6 @@ static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+ static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+ {
+- BUG_ON(!dev);
+- BUG_ON(!cu);
+-
+ dev->node_props.simd_id_base = cu->processor_id_low;
+ dev->node_props.simd_count = cu->num_simd_cores;
+ dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
+@@ -162,8 +156,6 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
+ {
+ struct kfd_topology_device *dev;
+
+- BUG_ON(!cu);
+-
+ pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
+ cu->proximity_domain, cu->hsa_capability);
+ list_for_each_entry(dev, device_list, list) {
+@@ -189,8 +181,6 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
+ struct kfd_mem_properties *props;
+ struct kfd_topology_device *dev;
+
+- BUG_ON(!mem);
+-
+ pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
+ mem->proximity_domain);
+ list_for_each_entry(dev, device_list, list) {
+@@ -242,8 +232,6 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+ uint32_t id;
+ uint32_t total_num_of_cu;
+
+- BUG_ON(!cache);
+-
+ id = cache->processor_id_low;
+
+ list_for_each_entry(dev, device_list, list) {
+@@ -309,8 +297,6 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ uint32_t id_from;
+ uint32_t id_to;
+
+- BUG_ON(!iolink);
+-
+ id_from = iolink->proximity_domain_from;
+ id_to = iolink->proximity_domain_to;
+
+@@ -383,8 +369,6 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ struct crat_subtype_iolink *iolink;
+ int ret = 0;
+
+- BUG_ON(!sub_type_hdr);
+-
+ switch (sub_type_hdr->type) {
+ case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
+ cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+index 7e44a86..d1157ff 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+@@ -64,9 +64,6 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
+ enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
+ struct kfd_dbgmgr *new_buff;
+
+- BUG_ON(!pdev);
+- BUG_ON(!pdev->init_complete);
+-
+ new_buff = kfd_alloc_struct(new_buff);
+ if (!new_buff) {
+ pr_err("Failed to allocate dbgmgr instance\n");
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 62ad717..45a234d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -272,7 +272,8 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
+
+ for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
+ if (supported_devices[i].did == did) {
+- BUG_ON(!supported_devices[i].device_info);
++ WARN(!supported_devices[i].device_info,
++ "Cannot look up device info, Device Info is NULL");
+ return supported_devices[i].device_info;
+ }
+ }
+@@ -302,8 +303,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ }
+ }
+
+- BUG_ON(!f2g);
+-
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+ if (!kfd)
+ return NULL;
+@@ -393,7 +392,8 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
+ flags);
+
+ dev = kfd_device_by_pci_dev(pdev);
+- BUG_ON(!dev);
++ if (WARN_ON(!dev))
++ return -ENODEV;
+
+ kfd_signal_iommu_event(dev, pasid, address,
+ flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
+@@ -633,8 +633,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
+
+ void kgd2kfd_suspend(struct kfd_dev *kfd)
+ {
+- BUG_ON(!kfd);
+-
+ if (!kfd->init_complete)
+ return;
+
+@@ -654,8 +652,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
+
+ int kgd2kfd_resume(struct kfd_dev *kfd)
+ {
+- BUG_ON(!kfd);
+-
+ if (!kfd->init_complete)
+ return 0;
+
+@@ -1003,17 +999,14 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+ {
+ unsigned int num_of_bits;
+
+- BUG_ON(!kfd);
+- BUG_ON(!kfd->gtt_mem);
+- BUG_ON(buf_size < chunk_size);
+- BUG_ON(buf_size == 0);
+- BUG_ON(chunk_size == 0);
+-
+ kfd->gtt_sa_chunk_size = chunk_size;
+ kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
+
+ num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE;
+- BUG_ON(num_of_bits == 0);
++ if (num_of_bits == 0) {
++ pr_err("Number of bits is 0 in %s", __func__);
++ return -EINVAL;
++ }
+
+ kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL);
+
+@@ -1054,8 +1047,6 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+ {
+ unsigned int found, start_search, cur_size;
+
+- BUG_ON(!kfd);
+-
+ if (size == 0)
+ return -EINVAL;
+
+@@ -1160,8 +1151,6 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+ {
+ unsigned int bit;
+
+- BUG_ON(!kfd);
+-
+ /* Act like kfree when trying to free a NULL object */
+ if (!mem_obj)
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 3d8fb00..172eddc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -83,7 +83,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
+
+ unsigned int get_queues_num(struct device_queue_manager *dqm)
+ {
+- BUG_ON(!dqm || !dqm->dev);
+ return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+ KGD_MAX_QUEUES);
+ }
+@@ -235,8 +234,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ {
+ int retval = 0;
+
+- BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
+-
+ print_queue(q);
+
+ mutex_lock(&dqm->lock);
+@@ -346,8 +343,6 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ int retval;
+ struct mqd_manager *mqd;
+
+- BUG_ON(!dqm || !q || !qpd);
+-
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd)
+ return -ENOMEM;
+@@ -401,8 +396,6 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+ int retval;
+ struct mqd_manager *mqd;
+
+- WARN_ON(!dqm || !q || !q->mqd || !qpd);
+-
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd)
+@@ -446,8 +439,6 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ {
+ int retval;
+
+- BUG_ON(!dqm || !q || !q->mqd || !qpd);
+-
+ mutex_lock(&dqm->lock);
+ retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
+ mutex_unlock(&dqm->lock);
+@@ -470,8 +461,6 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+
+ bool prev_active = false;
+
+- BUG_ON(!dqm || !q || !q->mqd);
+-
+ mutex_lock(&dqm->lock);
+
+ pdd = kfd_get_process_device_data(q->device, q->process);
+@@ -546,8 +535,6 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
+ {
+ struct mqd_manager *mqd;
+
+- BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
+-
+ pr_debug("mqd type %d\n", type);
+
+ mqd = dqm->mqds[type];
+@@ -569,8 +556,6 @@ int process_evict_queues(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+- BUG_ON(!dqm || !qpd);
+-
+ mutex_lock(&dqm->lock);
+ if (qpd->evicted++ > 0) /* already evicted, do nothing */
+ goto out;
+@@ -584,8 +569,9 @@ int process_evict_queues(struct device_queue_manager *dqm,
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd) { /* should not be here */
+- BUG();
+- continue;
++ pr_err("Cannot evict queue, mqd is NULL\n");
++ retval = -ENOMEM;
++ goto out;
+ }
+ /* if the queue is not active anyway, it is not evicted */
+ if (q->properties.is_active) {
+@@ -619,8 +605,6 @@ int process_restore_queues(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
+
+- BUG_ON(!dqm || !qpd);
+-
+ pdd = qpd_to_pdd(qpd);
+ /* Retrieve PD base */
+ pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+@@ -656,8 +640,9 @@ int process_restore_queues(struct device_queue_manager *dqm,
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd) { /* should not be here */
+- BUG();
+- continue;
++ pr_err("Cannot restore queue, mqd is NULL\n");
++ retval = -ENOMEM;
++ goto out_unlock;
+ }
+ if (q->properties.is_evicted) {
+ q->properties.is_evicted = false;
+@@ -692,8 +677,6 @@ static int register_process(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
+
+- BUG_ON(!dqm || !qpd);
+-
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+@@ -726,8 +709,6 @@ static int unregister_process(struct device_queue_manager *dqm,
+ int retval;
+ struct device_process_node *cur, *next;
+
+- BUG_ON(!dqm || !qpd);
+-
+ pr_debug("qpd->queues_list is %s\n",
+ list_empty(&qpd->queues_list) ? "empty" : "not empty");
+
+@@ -768,16 +749,12 @@ static void init_interrupts(struct device_queue_manager *dqm)
+ {
+ unsigned int i;
+
+- BUG_ON(!dqm);
+-
+ for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
+ if (is_pipe_enabled(dqm, 0, i))
+ dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
+ }
+ static int init_scheduler(struct device_queue_manager *dqm)
+ {
+- BUG_ON(!dqm);
+-
+ return 0;
+ }
+
+@@ -785,8 +762,6 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
+ {
+ int i;
+
+- BUG_ON(!dqm);
+-
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
+
+ dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
+@@ -813,10 +788,6 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
+ {
+ int i;
+
+- BUG_ON(!dqm);
+-
+- BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+-
+ kfree(dqm->allocated_queues);
+ for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+ kfree(dqm->mqds[i]);
+@@ -916,8 +887,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
+ int i, mec;
+ struct scheduling_resources res;
+
+- BUG_ON(!dqm);
+-
+ res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
+
+ res.queue_mask = 0;
+@@ -957,8 +926,6 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ {
+ int retval;
+
+- BUG_ON(!dqm);
+-
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
+
+ mutex_init(&dqm->lock);
+@@ -982,8 +949,6 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ {
+ int retval;
+
+- BUG_ON(!dqm);
+-
+ retval = 0;
+
+ retval = pm_init(&dqm->packets, dqm, dqm->dev->mec_fw_version);
+@@ -1022,8 +987,6 @@ static int start_cpsch(struct device_queue_manager *dqm)
+
+ static int stop_cpsch(struct device_queue_manager *dqm)
+ {
+- BUG_ON(!dqm);
+-
+ mutex_lock(&dqm->lock);
+
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
+@@ -1040,8 +1003,6 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+ {
+- BUG_ON(!dqm || !kq || !qpd);
+-
+ mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("Can't create new kernel queue because %d queues were already created\n",
+@@ -1071,8 +1032,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+ {
+- BUG_ON(!dqm || !kq);
+-
+ mutex_lock(&dqm->lock);
+ /* here we actually preempt the DIQ */
+ list_del(&kq->list);
+@@ -1095,8 +1054,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ int retval;
+ struct mqd_manager *mqd;
+
+- BUG_ON(!dqm || !q || !qpd);
+-
+ retval = 0;
+
+ if (allocate_vmid)
+@@ -1187,8 +1144,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ {
+ unsigned long end_jiffies;
+
+- BUG_ON(!fence_addr);
+-
+ end_jiffies = (timeout_ms * HZ / 1000) + jiffies;
+
+ while (*fence_addr != fence_value) {
+@@ -1242,8 +1197,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ int retval;
+
+- BUG_ON(!dqm);
+-
+ retval = 0;
+
+ if (!dqm->active_runlist)
+@@ -1286,8 +1239,6 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ int retval;
+ enum kfd_unmap_queues_filter filter;
+
+- BUG_ON(!dqm);
+-
+ filter = static_queues_included ?
+ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
+@@ -1311,8 +1262,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ struct mqd_manager *mqd;
+ bool preempt_all_queues;
+
+- BUG_ON(!dqm || !qpd || !q);
+-
+ preempt_all_queues = false;
+
+ retval = 0;
+@@ -1560,8 +1509,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ {
+ struct device_queue_manager *dqm;
+
+- BUG_ON(!dev);
+-
+ pr_debug("Loading device queue manager\n");
+
+ dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
+@@ -1616,8 +1563,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ dqm->ops.process_termination = process_termination_nocpsch;
+ break;
+ default:
+- BUG();
+- break;
++ pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
++ return NULL;
+ }
+
+ switch (dev->device_info->asic_family) {
+@@ -1655,8 +1602,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+
+ void device_queue_manager_uninit(struct device_queue_manager *dqm)
+ {
+- BUG_ON(!dqm);
+-
+ dqm->ops.uninitialize(dqm);
+ kfree(dqm);
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+index f051e0d..8e1eb24 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+@@ -81,7 +81,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+ * for LDS/Scratch and GPUVM.
+ */
+
+- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
++ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ top_address_nybble == 0);
+
+ return PRIVATE_BASE(top_address_nybble << 12) |
+@@ -120,8 +120,6 @@ static int update_qpd_cik(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+- BUG_ON(!dqm || !qpd);
+-
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+@@ -156,8 +154,6 @@ static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+- BUG_ON(!dqm || !qpd);
+-
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+index f6c6bea..ac8d852 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+@@ -93,7 +93,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+ * for LDS/Scratch and GPUVM.
+ */
+
+- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
++ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ top_address_nybble == 0);
+
+ return top_address_nybble << 12 |
+@@ -163,8 +163,6 @@ static int update_qpd_vi(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+- BUG_ON(!dqm || !qpd);
+-
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+@@ -206,8 +204,6 @@ static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+- BUG_ON(!dqm || !qpd);
+-
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index 899fc4c..f4833b2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -94,7 +94,7 @@ void kfd_doorbell_init(struct kfd_dev *kfd)
+ kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
+ kfd_doorbell_process_slice(kfd));
+
+- BUG_ON(!kfd->doorbell_kernel_ptr);
++ WARN_ON(!kfd->doorbell_kernel_ptr);
+
+ pr_debug("Doorbell initialization:\n");
+ pr_debug("doorbell base == 0x%08lX\n",
+@@ -159,8 +159,6 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ {
+ u32 inx;
+
+- BUG_ON(!kfd || !doorbell_off);
+-
+ mutex_lock(&kfd->doorbell_mutex);
+ inx = find_first_zero_bit(kfd->doorbell_available_index,
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+@@ -193,8 +191,6 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
+ {
+ unsigned int inx;
+
+- BUG_ON(!kfd || !db_addr);
+-
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+
+ mutex_lock(&kfd->doorbell_mutex);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 17c4a71..34099e2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -41,9 +41,6 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ int retval;
+ union PM4_MES_TYPE_3_HEADER nop;
+
+- BUG_ON(!kq || !dev);
+- BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
+-
+ pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
+ queue_size);
+
+@@ -63,8 +60,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ KFD_MQD_TYPE_HIQ);
+ break;
+ default:
+- BUG();
+- break;
++ pr_err("Invalid queue type %d\n", type);
++ return false;
+ }
+
+ if (!kq->mqd)
+@@ -182,8 +179,6 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+
+ static void uninitialize(struct kernel_queue *kq)
+ {
+- BUG_ON(!kq);
+-
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ kq->mqd->destroy_mqd(kq->mqd,
+ NULL,
+@@ -214,8 +209,6 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
+ uint64_t wptr64;
+ unsigned int *queue_address;
+
+- BUG_ON(!kq || !buffer_ptr);
+-
+ /* When rptr == wptr, the buffer is empty.
+ * When rptr == wptr + 1, the buffer is full.
+ * It is always rptr that advances to the position of wptr, rather than
+@@ -304,11 +297,7 @@ static void submit_packet(struct kernel_queue *kq)
+ {
+ #ifdef DEBUG
+ int i;
+-#endif
+
+- BUG_ON(!kq);
+-
+-#ifdef DEBUG
+ for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
+ pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
+ if (i % 15 == 0)
+@@ -322,7 +311,6 @@ static void submit_packet(struct kernel_queue *kq)
+
+ static void rollback_packet(struct kernel_queue *kq)
+ {
+- BUG_ON(!kq);
+ kq->pending_wptr = *kq->queue->properties.write_ptr;
+ }
+
+@@ -331,8 +319,6 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ {
+ struct kernel_queue *kq;
+
+- BUG_ON(!dev);
+-
+ kq = kzalloc(sizeof(*kq), GFP_KERNEL);
+ if (!kq)
+ return NULL;
+@@ -373,8 +359,6 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+
+ void kernel_queue_uninit(struct kernel_queue *kq)
+ {
+- BUG_ON(!kq);
+-
+ kq->ops.uninitialize(kq);
+ kfree(kq);
+ }
+@@ -385,15 +369,11 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
+ uint32_t *buffer, i;
+ int retval;
+
+- BUG_ON(!dev);
+-
+ pr_err("Starting kernel queue test\n");
+
+ kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
+- BUG_ON(!kq);
+
+ retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
+- BUG_ON(retval != 0);
+ for (i = 0; i < 5; i++)
+ buffer[i] = kq->nop_packet;
+ kq->ops.submit_packet(kq);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 5ada92a..4b4de78 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -176,8 +176,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ int retval;
+ struct cik_sdma_rlc_registers *m;
+
+- BUG_ON(!mm || !mqd || !mqd_mem_obj);
+-
+ retval = kfd_gtt_sa_allocate(mm->dev,
+ sizeof(struct cik_sdma_rlc_registers),
+ mqd_mem_obj);
+@@ -201,14 +199,12 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+ {
+- BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+ }
+
+ static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+ {
+- BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+ }
+
+@@ -239,8 +235,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ {
+ struct cik_mqd *m;
+
+- BUG_ON(!mm || !q || !mqd);
+-
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE;
+@@ -298,8 +292,6 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ {
+ struct cik_sdma_rlc_registers *m;
+
+- BUG_ON(!mm || !mqd || !q);
+-
+ m = get_sdma_mqd(mqd);
+ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+@@ -382,8 +374,6 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ struct cik_mqd *m;
+ int retval;
+
+- BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
+-
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
+ mqd_mem_obj);
+
+@@ -437,8 +427,6 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+ {
+ struct cik_mqd *m;
+
+- BUG_ON(!mm || !q || !mqd);
+-
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE |
+@@ -499,9 +487,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ {
+ struct mqd_manager *mqd;
+
+- BUG_ON(!dev);
+- BUG_ON(type >= KFD_MQD_TYPE_MAX);
+-
+ mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ if (!mqd)
+ return NULL;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index 80e3e0e..c1d35f4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -199,8 +199,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ {
+ struct vi_mqd *m;
+
+- BUG_ON(!mm || !q || !mqd);
+-
+ m = get_mqd(mqd);
+
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
+@@ -299,7 +297,6 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+ {
+- BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+ }
+
+@@ -352,8 +349,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ struct vi_sdma_mqd *m;
+
+
+- BUG_ON(!mm || !mqd || !mqd_mem_obj);
+-
+ retval = kfd_gtt_sa_allocate(mm->dev,
+ sizeof(struct vi_sdma_mqd),
+ mqd_mem_obj);
+@@ -377,7 +372,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+ {
+- BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+ }
+
+@@ -395,8 +389,6 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ {
+ struct vi_sdma_mqd *m;
+
+- BUG_ON(!mm || !mqd || !q);
+-
+ m = get_sdma_mqd(mqd);
+ m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+@@ -473,9 +465,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ {
+ struct mqd_manager *mqd;
+
+- BUG_ON(!dev);
+- BUG_ON(type >= KFD_MQD_TYPE_MAX);
+-
+ mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ if (!mqd)
+ return NULL;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index b2a3b8d..3949b22 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -33,7 +33,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
+ {
+ unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
+
+- BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
++ WARN_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
++
+ *wptr = temp;
+ }
+
+@@ -47,8 +48,6 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+
+ struct kfd_dev *dev = pm->dqm->dev;
+
+- BUG_ON(!pm || !rlib_size || !over_subscription || !dev);
+-
+ process_count = pm->dqm->processes_count;
+ queue_count = pm->dqm->queue_count;
+ compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
+@@ -93,10 +92,6 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
+ {
+ int retval;
+
+- BUG_ON(!pm);
+- BUG_ON(pm->allocated);
+- BUG_ON(!is_over_subscription);
+-
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+
+ mutex_lock(&pm->lock);
+@@ -135,8 +130,6 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ struct kernel_queue *kq;
+ bool is_over_subscription = false;
+
+- BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
+-
+ rl_wptr = retval = proccesses_mapped = 0;
+
+ retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
+@@ -224,8 +217,6 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+ uint16_t fw_ver)
+ {
+- BUG_ON(!dqm);
+-
+ pm->dqm = dqm;
+ mutex_init(&pm->lock);
+ pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+@@ -258,8 +249,6 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+
+ void pm_uninit(struct packet_manager *pm)
+ {
+- BUG_ON(!pm);
+-
+ mutex_destroy(&pm->lock);
+ kernel_queue_uninit(pm->priv_queue);
+ }
+@@ -296,8 +285,6 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+ size_t rl_ib_size, packet_size_dwords;
+ int retval;
+
+- BUG_ON(!pm || !dqm_queues);
+-
+ retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
+ &rl_ib_size);
+ if (retval != 0)
+@@ -386,8 +373,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+
+ void pm_release_ib(struct packet_manager *pm)
+ {
+- BUG_ON(!pm);
+-
+ mutex_lock(&pm->lock);
+ if (pm->allocated) {
+ kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+index b3f7d43..595d35d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+@@ -92,6 +92,7 @@ unsigned int kfd_pasid_alloc(void)
+
+ void kfd_pasid_free(unsigned int pasid)
+ {
+- BUG_ON(pasid == 0 || pasid >= pasid_limit);
++ if (WARN_ON(pasid == 0 || pasid >= pasid_limit))
++ return;
+ clear_bit(pasid, pasid_bitmap);
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 165591f..36a678a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -210,8 +210,6 @@ struct kfd_process *kfd_create_process(struct file *filep)
+
+ struct task_struct *thread = current;
+
+- BUG_ON(!kfd_process_wq);
+-
+ if (!thread->mm)
+ return ERR_PTR(-EINVAL);
+
+@@ -409,7 +407,8 @@ static void kfd_process_ref_release(struct kref *ref)
+ {
+ struct kfd_process *p = container_of(ref, struct kfd_process, ref);
+
+- BUG_ON(!kfd_process_wq);
++ if (WARN_ON(!kfd_process_wq))
++ return;
+
+ INIT_WORK(&p->release_work, kfd_process_wq_release);
+ queue_work(kfd_process_wq, &p->release_work);
+@@ -435,7 +434,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ * mmu_notifier srcu is read locked
+ */
+ p = container_of(mn, struct kfd_process, mmu_notifier);
+- BUG_ON(p->mm != mm);
++ if (WARN_ON(p->mm != mm))
++ return;
+
+ cancel_delayed_work_sync(&p->eviction_work.dwork);
+ cancel_delayed_work_sync(&p->restore_work);
+@@ -818,8 +818,6 @@ void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
+ struct kfd_process *p;
+ struct kfd_process_device *pdd;
+
+- BUG_ON(!dev);
+-
+ /*
+ * Look for the process that matches the pasid. If there is no such
+ * process, we either released it in amdkfd's own notifier, or there
+@@ -892,9 +890,6 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ struct kfd_bo *buf_obj;
+ struct kfd_process *p;
+
+- BUG_ON(!pdd);
+- BUG_ON(!mem);
+-
+ p = pdd->process;
+
+ buf_obj = kzalloc(sizeof(*buf_obj), GFP_KERNEL);
+@@ -928,8 +923,6 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ struct kfd_bo *kfd_process_device_find_bo(struct kfd_process_device *pdd,
+ int handle)
+ {
+- BUG_ON(!pdd);
+-
+ if (handle < 0)
+ return NULL;
+
+@@ -984,8 +977,6 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
+ struct kfd_bo *buf_obj;
+ struct kfd_process *p;
+
+- BUG_ON(!pdd);
+-
+ p = pdd->process;
+
+ if (handle < 0)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 9fbcb63..f249f96 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -32,8 +32,6 @@ static inline struct process_queue_node *get_queue_by_qid(
+ {
+ struct process_queue_node *pqn;
+
+- BUG_ON(!pqm);
+-
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if ((pqn->q && pqn->q->properties.queue_id == qid) ||
+ (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
+@@ -48,8 +46,6 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
+ {
+ unsigned long found;
+
+- BUG_ON(!pqm || !qid);
+-
+ found = find_first_zero_bit(pqm->queue_slot_bitmap,
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+
+@@ -100,8 +96,6 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
+
+ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
+ {
+- BUG_ON(!pqm);
+-
+ INIT_LIST_HEAD(&pqm->queues);
+ pqm->queue_slot_bitmap =
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+@@ -117,8 +111,6 @@ void pqm_uninit(struct process_queue_manager *pqm)
+ {
+ struct process_queue_node *pqn, *next;
+
+- BUG_ON(!pqm);
+-
+ list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
+ uninit_queue(pqn->q);
+ list_del(&pqn->process_queue_list);
+@@ -170,8 +162,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ struct queue *cur;
+ enum kfd_queue_type type = properties->type;
+
+- BUG_ON(!pqm || !dev || !properties || !qid);
+-
+ q = NULL;
+ kq = NULL;
+
+@@ -262,8 +252,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ kq, &pdd->qpd);
+ break;
+ default:
+- BUG();
+- break;
++ pr_err("Invalid queue type %d\n", type);
++ return -EINVAL;
+ }
+
+ if (retval != 0) {
+@@ -311,8 +301,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ int retval;
+
+ dqm = NULL;
+-
+- BUG_ON(!pqm);
+ retval = 0;
+
+ pqn = get_queue_by_qid(pqm, qid);
+@@ -326,7 +314,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ dev = pqn->kq->dev;
+ if (pqn->q)
+ dev = pqn->q->device;
+- BUG_ON(!dev);
++ if (!dev) {
++ pr_err("Cannot destroy queue, kfd device is NULL\n");
++ return -ENODEV;
++ }
+
+ pdd = kfd_get_process_device_data(dev, pqm->process);
+ if (!pdd) {
+@@ -370,8 +361,6 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+ int retval;
+ struct process_queue_node *pqn;
+
+- BUG_ON(!pqm);
+-
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("No queue %d exists for update operation\n", qid);
+@@ -397,8 +386,6 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ int retval;
+ struct process_queue_node *pqn;
+
+- BUG_ON(!pqm);
+-
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("No queue %d exists for update operation\n", qid);
+@@ -427,8 +414,6 @@ struct kernel_queue *pqm_get_kernel_queue(
+ {
+ struct process_queue_node *pqn;
+
+- BUG_ON(!pqm);
+-
+ pqn = get_queue_by_qid(pqm, qid);
+ if (pqn && pqn->kq)
+ return pqn->kq;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+index 5ad9f6f..a5315d4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -67,8 +67,6 @@ int init_queue(struct queue **q, const struct queue_properties *properties)
+ {
+ struct queue *tmp_q;
+
+- BUG_ON(!q);
+-
+ tmp_q = kzalloc(sizeof(*tmp_q), GFP_KERNEL);
+ if (!tmp_q)
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 83e7d36..fc83141 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -143,8 +143,6 @@ static void kfd_release_topology_device(struct kfd_topology_device *dev)
+ struct kfd_perf_properties *perf;
+ #endif
+
+- BUG_ON(!dev);
+-
+ list_del(&dev->list);
+
+ while (dev->mem_props.next != &dev->mem_props) {
+@@ -555,8 +553,6 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
+ struct kfd_perf_properties *perf;
+ #endif
+
+- BUG_ON(!dev);
+-
+ if (dev->kobj_iolink) {
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ if (iolink->kobj) {
+@@ -628,12 +624,14 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ int ret;
+ uint32_t i;
+
+- BUG_ON(!dev);
++ if (dev->kobj_node) {
++ pr_err("Cannot build sysfs node entry, kobj_node is not NULL\n");
++ return -EINVAL;
++ }
+
+ /*
+ * Creating the sysfs folders
+ */
+- BUG_ON(dev->kobj_node);
+ dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
+ if (!dev->kobj_node)
+ return -ENOMEM;
+@@ -1162,8 +1160,6 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+ struct kfd_topology_device *dev;
+ struct kfd_topology_device *out_dev = NULL;
+
+- BUG_ON(!gpu);
+-
+ down_write(&topology_lock);
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (!dev->gpu && (dev->node_props.simd_count > 0)) {
+@@ -1233,8 +1229,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ size_t image_size = 0;
+ int proximity_domain;
+
+- BUG_ON(!gpu);
+-
+ INIT_LIST_HEAD(&temp_topology_device_list);
+
+ gpu_id = kfd_generate_gpu_id(gpu);
+@@ -1279,7 +1273,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
+ gpu_id, res);
+ dev = kfd_assign_gpu(gpu);
+- BUG_ON(!dev);
++ if (!dev) {
++ pr_err("Could not assign GPU\n");
++ res = -ENODEV;
++ goto err;
++ }
+ }
+
+ dev->gpu_id = gpu_id;
+@@ -1361,8 +1359,6 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
+ uint32_t gpu_id;
+ int res = -ENODEV;
+
+- BUG_ON(!gpu);
+-
+ down_write(&topology_lock);
+
+ list_for_each_entry(dev, &topology_device_list, list)
+--
+2.7.4
+