aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/1284-drm-amdkfd-Change-x-NULL-false-references-to-x.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/1284-drm-amdkfd-Change-x-NULL-false-references-to-x.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/1284-drm-amdkfd-Change-x-NULL-false-references-to-x.patch1054
1 files changed, 1054 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/1284-drm-amdkfd-Change-x-NULL-false-references-to-x.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/1284-drm-amdkfd-Change-x-NULL-false-references-to-x.patch
new file mode 100644
index 00000000..682350c2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/1284-drm-amdkfd-Change-x-NULL-false-references-to-x.patch
@@ -0,0 +1,1054 @@
+From a0c2de4c7cd2ed840d5f7ac01f79d1f26ebea969 Mon Sep 17 00:00:00 2001
+From: Kent Russell <kent.russell@amd.com>
+Date: Mon, 12 Dec 2016 09:15:31 -0500
+Subject: [PATCH 1284/4131] drm/amdkfd: Change x==NULL/false references to !x
+
+Upstream prefers the !x notation to x==NULL or x==false. Along those lines
+change the ==true or !=NULL references as well. Also make the references
+to !x the same, excluding () for readability.
+
+Change-Id: Icee0460ad3deb3081880948a389aa3557406c01c
+Signed-off-by: Kent Russell <kent.russell@amd.com>
+
+ Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 56 +++++++++++-----------
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 10 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 16 +++----
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c | 19 ++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 9 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 10 ++--
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 18 +++----
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 8 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_ipc.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 6 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 10 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 30 ++++++------
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 6 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 16 +++----
+ 18 files changed, 112 insertions(+), 118 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 91e977b..a73cd35 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -285,7 +285,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+
+ pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
+- if (dev == NULL) {
++ if (!dev) {
+ pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
+ return -EINVAL;
+ }
+@@ -479,7 +479,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
+ }
+
+ dev = kfd_device_by_id(args->gpu_id);
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ down_write(&p->lock);
+@@ -520,7 +520,7 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
+ struct kfd_process_device *pdd;
+
+ dev = kfd_device_by_id(args->gpu_id);
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ down_write(&p->lock);
+@@ -572,7 +572,7 @@ kfd_ioctl_dbg_register(struct file *filep, struct kfd_process *p, void *data)
+ return PTR_ERR(pdd);
+ }
+
+- if (dev->dbgmgr == NULL) {
++ if (!dev->dbgmgr) {
+ /* In case of a legal call, we have no dbgmgr yet */
+
+ create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
+@@ -920,9 +920,9 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
+ "scratch_limit %llX\n", pdd->scratch_limit);
+
+ args->num_of_nodes++;
+- } while ((pdd = kfd_get_next_process_device_data(p, pdd)) !=
+- NULL &&
+- (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
++
++ pdd = kfd_get_next_process_device_data(p, pdd);
++ } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+ }
+
+ up_write(&p->lock);
+@@ -956,8 +956,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ pdd = kfd_get_first_process_device_data(p);
+ do {
+ args->num_of_nodes++;
+- } while ((pdd =
+- kfd_get_next_process_device_data(p, pdd)) != NULL);
++ pdd = kfd_get_next_process_device_data(p, pdd);
++ } while (pdd);
+
+ up_write(&p->lock);
+ return 0;
+@@ -1007,9 +1007,9 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ dev_dbg(kfd_device,
+ "scratch_limit %llX\n", pdd->scratch_limit);
+ nodes++;
+- } while (
+- (pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
+- (nodes < args->num_of_nodes));
++
++ pdd = kfd_get_next_process_device_data(p, pdd);
++ } while (pdd && (nodes < args->num_of_nodes));
+ up_write(&p->lock);
+
+ args->num_of_nodes = nodes;
+@@ -1127,7 +1127,7 @@ static int kfd_ioctl_alloc_scratch_memory(struct file *filep,
+ return -EINVAL;
+
+ dev = kfd_device_by_id(args->gpu_id);
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ down_write(&p->lock);
+@@ -1193,7 +1193,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ return -EINVAL;
+
+ dev = kfd_device_by_id(args->gpu_id);
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ down_write(&p->lock);
+@@ -1252,7 +1252,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
+ int ret;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ down_write(&p->lock);
+@@ -1266,7 +1266,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
+
+ buf_obj = kfd_process_device_find_bo(pdd,
+ GET_IDR_HANDLE(args->handle));
+- if (buf_obj == NULL) {
++ if (!buf_obj) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+@@ -1323,7 +1323,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ uint32_t *devices_arr = NULL;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ if (args->device_ids_array_size > 0 &&
+@@ -1359,7 +1359,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ GET_IDR_HANDLE(args->handle));
+ up_write(&p->lock);
+
+- if (mem == NULL) {
++ if (!mem) {
+ err = PTR_ERR(mem);
+ goto get_mem_obj_from_handle_failed;
+ }
+@@ -1431,7 +1431,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ uint32_t *devices_arr = NULL, num_dev, i;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ if (args->device_ids_array_size > 0 &&
+@@ -1468,7 +1468,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ GET_IDR_HANDLE(args->handle));
+ up_write(&p->lock);
+
+- if (mem == NULL) {
++ if (!mem) {
+ err = PTR_ERR(mem);
+ goto get_mem_obj_from_handle_failed;
+ }
+@@ -1516,7 +1516,7 @@ static int kfd_ioctl_open_graphic_handle(struct file *filep,
+ long err;
+
+ dev = kfd_device_by_id(args->gpu_id);
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ if (dev->device_info->asic_family != CHIP_KAVERI) {
+@@ -1570,7 +1570,7 @@ static int kfd_ioctl_set_process_dgpu_aperture(struct file *filep,
+ long err;
+
+ dev = kfd_device_by_id(args->gpu_id);
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+
+ down_write(&p->lock);
+@@ -1777,7 +1777,7 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ args->dst_mem_array_size),
+ sizeof(struct kfd_memory_range),
+ GFP_KERNEL);
+- if (src_array == NULL)
++ if (!src_array)
+ return -ENOMEM;
+ dst_array = &src_array[args->src_mem_array_size];
+
+@@ -1796,14 +1796,14 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+
+ /* Get remote process */
+ remote_pid = find_get_pid(args->pid);
+- if (remote_pid == NULL) {
++ if (!remote_pid) {
+ pr_err("Cross mem copy failed. Invalid PID %d\n", args->pid);
+ err = -ESRCH;
+ goto copy_from_user_fail;
+ }
+
+ remote_task = get_pid_task(remote_pid, PIDTYPE_PID);
+- if (remote_pid == NULL) {
++ if (!remote_pid) {
+ pr_err("Cross mem copy failed. Invalid PID or task died %d\n",
+ args->pid);
+ err = -ESRCH;
+@@ -1824,7 +1824,7 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ }
+
+ remote_p = kfd_get_process(remote_task);
+- if (remote_p == NULL) {
++ if (!remote_p) {
+ pr_err("Cross mem copy failed. Invalid kfd process %d\n",
+ args->pid);
+ err = -EINVAL;
+@@ -1857,7 +1857,7 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ dst_va_addr,
+ dst_va_addr + dst_array[0].size - 1);
+ up_read(&dst_p->lock);
+- if (dst_bo == NULL) {
++ if (!dst_bo) {
+ err = -EFAULT;
+ goto kfd_process_fail;
+ }
+@@ -1873,7 +1873,7 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ src_array[i].va_addr,
+ src_va_addr_end);
+ up_read(&src_p->lock);
+- if (src_bo == NULL || src_va_addr_end > src_bo->it.last) {
++ if (!src_bo || src_va_addr_end > src_bo->it.last) {
+ pr_err("Cross mem copy failed. Invalid range\n");
+ err = -EFAULT;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 26f6e72..c69242f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -196,7 +196,7 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
+ list_for_each_entry(dev, device_list, list) {
+ if (mem->proximity_domain == dev->proximity_domain) {
+ props = kfd_alloc_struct(props);
+- if (props == NULL)
++ if (!props)
+ return -ENOMEM;
+
+ /* We're on GPU node */
+@@ -265,7 +265,7 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+ id < dev->node_props.simd_id_base +
+ total_num_of_cu)) {
+ props = kfd_alloc_struct(props);
+- if (props == NULL)
++ if (!props)
+ return -ENOMEM;
+
+ props->processor_id_low = id;
+@@ -319,7 +319,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ list_for_each_entry(dev, device_list, list) {
+ if (id_from == dev->proximity_domain) {
+ props = kfd_alloc_struct(props);
+- if (props == NULL)
++ if (!props)
+ return -ENOMEM;
+
+ props->node_from = id_from;
+@@ -894,7 +894,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
+ uint32_t entries = 0;
+ #endif
+
+- if (pcrat_image == NULL || avail_size < VCRAT_SIZE_FOR_CPU)
++ if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
+ return -EINVAL;
+
+ /* Fill in CRAT Header.
+@@ -1088,7 +1088,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ #endif
+ struct kfd_local_mem_info local_mem_info;
+
+- if (pcrat_image == NULL || avail_size < VCRAT_SIZE_FOR_GPU)
++ if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
+ return -EINVAL;
+
+ /* Fill the CRAT Header.
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index 07c42d2..8acdfcd 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -64,8 +64,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
+ union ULARGE_INTEGER *largep;
+ union ULARGE_INTEGER addr;
+
+- if ((kq == NULL) || (packet_buff == NULL) ||
+- (size_in_bytes == 0)) {
++ if (!kq || !packet_buff || (size_in_bytes == 0)) {
+ pr_err("Illegal packet parameters\n");
+ return -EINVAL;
+ }
+@@ -167,7 +166,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
+ status = amdkfd_fence_wait_timeout((unsigned int *) rm_state,
+ QUEUESTATE__ACTIVE, 1500);
+
+- if (rm_state != NULL)
++ if (rm_state)
+ kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+
+ return status;
+@@ -212,7 +211,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
+
+ kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
+
+- if (kq == NULL) {
++ if (!kq) {
+ pr_err("Error getting Kernel Queue\n");
+ return -ENOMEM;
+ }
+@@ -234,8 +233,7 @@ static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
+ /* todo - if needed, kill wavefronts and disable watch */
+ int status = 0;
+
+- if ((dbgdev == NULL) || (dbgdev->pqm == NULL) ||
+- (dbgdev->kq == NULL)) {
++ if (!dbgdev || !dbgdev->pqm || !dbgdev->kq) {
+ pr_err("Can't destroy diq\n");
+ status = -EFAULT;
+ } else {
+@@ -261,7 +259,7 @@ static void dbgdev_address_watch_set_registers(
+ addrLo->u32All = 0;
+ cntl->u32All = 0;
+
+- if (adw_info->watch_mask != NULL)
++ if (adw_info->watch_mask)
+ cntl->bitfields.mask =
+ (uint32_t) (adw_info->watch_mask[index] &
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK);
+@@ -278,7 +276,7 @@ static void dbgdev_address_watch_set_registers(
+ cntl->bitfields.mode = adw_info->watch_mode[index];
+ cntl->bitfields.vmid = (uint32_t) vmid;
+ /* for APU assume it is an ATC address. */
+- if (KFD_IS_DGPU(asic_family) == false)
++ if (!KFD_IS_DGPU(asic_family))
+ cntl->u32All |= ADDRESS_WATCH_REG_CNTL_ATC_BIT;
+ pr_debug("\t\t%20s %08x\n", "set reg mask :", cntl->bitfields.mask);
+ pr_debug("\t\t%20s %08x\n", "set reg add high :",
+@@ -318,7 +316,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
+ vmid = pdd->qpd.vmid;
+
+ if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
+- (adw_info->num_watch_points == 0) || (adw_info->watch_mode == NULL))
++ (adw_info->num_watch_points == 0) || !adw_info->watch_mode)
+ return -EINVAL;
+
+ for (i = 0; i < adw_info->num_watch_points; i++) {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+index 6a92a74..7e44a86 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+@@ -52,7 +52,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
+
+ void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
+ {
+- if (pmgr != NULL) {
++ if (pmgr) {
+ kfd_dbgmgr_uninitialize(pmgr);
+ kfree(pmgr);
+ pmgr = NULL;
+@@ -64,7 +64,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
+ enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
+ struct kfd_dbgmgr *new_buff;
+
+- BUG_ON(pdev == NULL);
++ BUG_ON(!pdev);
+ BUG_ON(!pdev->init_complete);
+
+ new_buff = kfd_alloc_struct(new_buff);
+@@ -94,7 +94,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
+
+ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
+ {
+- if ((!pmgr) || (!pmgr->dev) || (!pmgr->dbgdev))
++ if (!pmgr || !pmgr->dev || !pmgr->dbgdev)
+ return -EINVAL;
+
+ if (pmgr->pasid != 0) {
+@@ -121,8 +121,7 @@ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
+ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
+ {
+
+- if ((pmgr == NULL) || (pmgr->dev == NULL) || (pmgr->dbgdev == NULL) ||
+- (p == NULL))
++ if (!pmgr || !pmgr->dev || !pmgr->dbgdev || !p)
+ return -EINVAL;
+
+ if (pmgr->pasid != p->pasid) {
+@@ -146,8 +145,8 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
+ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
+ struct dbg_wave_control_info *wac_info)
+ {
+- if ((!pmgr) || (!pmgr->dev) || (!pmgr->dbgdev) || (!wac_info) ||
+- (wac_info->process == NULL))
++ if (!pmgr || !pmgr->dev || !pmgr->dbgdev || !wac_info ||
++ !wac_info->process)
+ return -EINVAL;
+
+ /* Is the requests coming from the already registered
+@@ -167,8 +166,8 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
+ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
+ struct dbg_address_watch_info *adw_info)
+ {
+- if ((!pmgr) || (!pmgr->dev) || (!pmgr->dbgdev) || (!adw_info) ||
+- (adw_info->process == NULL))
++ if (!pmgr || !pmgr->dev || !pmgr->dbgdev || !adw_info ||
++ !adw_info->process)
+ return -EINVAL;
+
+ /* Is the requests coming from the already registered
+@@ -197,7 +196,7 @@ long kfd_dbgmgr_abnormal_termination(struct kfd_dbgmgr *pmgr,
+ long status = 0;
+ struct dbg_wave_control_info wac_info;
+
+- if ((!pmgr) || (!pmgr->dev) || (!pmgr->dbgdev))
++ if (!pmgr || !pmgr->dev || !pmgr->dbgdev)
+ return -EINVAL;
+
+ /* first, we kill all the wavefronts of this process */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 24952c2..232e28f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -45,8 +45,7 @@ void kfd_debugfs_init(void)
+ struct dentry *ent;
+
+ debugfs_root = debugfs_create_dir("kfd", NULL);
+- if (debugfs_root == NULL ||
+- debugfs_root == ERR_PTR(-ENODEV)) {
++ if (!debugfs_root || debugfs_root == ERR_PTR(-ENODEV)) {
+ pr_warn("Failed to create kfd debugfs dir\n");
+ return;
+ }
+@@ -54,19 +53,19 @@ void kfd_debugfs_init(void)
+ ent = debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_mqds_by_process,
+ &kfd_debugfs_fops);
+- if (ent == NULL)
++ if (!ent)
+ pr_warn("Failed to create mqds in kfd debugfs\n");
+
+ ent = debugfs_create_file("hqds", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_hqds_by_device,
+ &kfd_debugfs_fops);
+- if (ent == NULL)
++ if (!ent)
+ pr_warn("Failed to create hqds in kfd debugfs\n");
+
+ ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_rls_by_device,
+ &kfd_debugfs_fops);
+- if (ent == NULL)
++ if (!ent)
+ pr_warn("Failed to create rls in kfd debugfs\n");
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index b5ecfff..62ad717 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -272,7 +272,7 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
+
+ for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
+ if (supported_devices[i].did == did) {
+- BUG_ON(supported_devices[i].device_info == NULL);
++ BUG_ON(!supported_devices[i].device_info);
+ return supported_devices[i].device_info;
+ }
+ }
+@@ -393,7 +393,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
+ flags);
+
+ dev = kfd_device_by_pci_dev(pdev);
+- BUG_ON(dev == NULL);
++ BUG_ON(!dev);
+
+ kfd_signal_iommu_event(dev, pasid, address,
+ flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
+@@ -633,7 +633,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
+
+ void kgd2kfd_suspend(struct kfd_dev *kfd)
+ {
+- BUG_ON(kfd == NULL);
++ BUG_ON(!kfd);
+
+ if (!kfd->init_complete)
+ return;
+@@ -654,7 +654,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
+
+ int kgd2kfd_resume(struct kfd_dev *kfd)
+ {
+- BUG_ON(kfd == NULL);
++ BUG_ON(!kfd);
+
+ if (!kfd->init_complete)
+ return 0;
+@@ -1063,7 +1063,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+ return -ENOMEM;
+
+ *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+- if ((*mem_obj) == NULL)
++ if (!(*mem_obj))
+ return -ENOMEM;
+
+ pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index bae79d0..5840e56 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -351,7 +351,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ BUG_ON(!dqm || !q || !qpd);
+
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+- if (mqd == NULL)
++ if (!mqd)
+ return -ENOMEM;
+
+ retval = allocate_hqd(dqm, q);
+@@ -486,7 +486,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ }
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (mqd == NULL) {
++ if (!mqd) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+@@ -511,7 +511,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ pr_err("unmap queue failed");
+ goto out_unlock;
+ }
+- } else if (is_queue_nocpsch(dqm, q) && prev_active == true) {
++ } else if (is_queue_nocpsch(dqm, q) && prev_active) {
+ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+@@ -532,9 +532,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ * check active state vs. the previous state
+ * and modify counter accordingly
+ */
+- if ((q->properties.is_active) && (!prev_active))
++ if (q->properties.is_active && !prev_active)
+ dqm->queue_count++;
+- else if ((!q->properties.is_active) && (prev_active))
++ else if (!q->properties.is_active && prev_active)
+ dqm->queue_count--;
+
+ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
+@@ -558,7 +558,7 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
+ mqd = dqm->mqds[type];
+ if (!mqd) {
+ mqd = mqd_manager_init(type, dqm->dev);
+- if (mqd == NULL)
++ if (!mqd)
+ pr_err("mqd manager is NULL");
+ dqm->mqds[type] = mqd;
+ }
+@@ -595,7 +595,7 @@ int process_evict_queues(struct device_queue_manager *dqm,
+ continue;
+ }
+ /* if the queue is not active anyway, it is not evicted */
+- if (q->properties.is_active == true) {
++ if (q->properties.is_active) {
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+ }
+@@ -774,7 +774,7 @@ static void init_interrupts(struct device_queue_manager *dqm)
+ {
+ unsigned int i;
+
+- BUG_ON(dqm == NULL);
++ BUG_ON(!dqm);
+
+ for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
+ if (is_pipe_enabled(dqm, 0, i))
+@@ -1137,7 +1137,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+
+- if (mqd == NULL) {
++ if (!mqd) {
+ retval = -ENOMEM;
+ goto out_deallocate_doorbell;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index ac9a463..bad7e6e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -258,7 +258,7 @@ allocate_debug_event_notification_slot(struct file *devkfd,
+
+ if (list_empty(&p->signal_event_pages)) {
+ ret = allocate_signal_page(devkfd, p);
+- if (ret == false)
++ if (!ret)
+ return ret;
+ }
+
+@@ -344,7 +344,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
+
+ for (id = p->next_nonsignal_event_id;
+ id < KFD_LAST_NONSIGNAL_EVENT_ID &&
+- lookup_event_by_id(p, id) != NULL;
++ lookup_event_by_id(p, id);
+ id++)
+ ;
+
+@@ -362,7 +362,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
+
+ for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ id < KFD_LAST_NONSIGNAL_EVENT_ID &&
+- lookup_event_by_id(p, id) != NULL;
++ lookup_event_by_id(p, id);
+ id++)
+ ;
+
+@@ -460,7 +460,7 @@ void kfd_event_init_process(struct kfd_process *p)
+
+ static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
+ {
+- if (ev->signal_page != NULL) {
++ if (ev->signal_page) {
+ if (ev->type == KFD_EVENT_TYPE_SIGNAL) {
+ release_event_notification_slot(ev->signal_page,
+ ev->signal_slot_index);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index a164fd5..8294371 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -367,7 +367,7 @@ int kfd_init_apertures(struct kfd_process *process)
+ }
+
+ pdd = kfd_create_process_device_data(dev, process);
+- if (pdd == NULL) {
++ if (!pdd) {
+ pr_err("Failed to create process device data\n");
+ goto err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+index 9654d99..e6eb3dc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+@@ -168,7 +168,7 @@ int kfd_ipc_import_dmabuf(struct kfd_dev *dev,
+ int r;
+ struct dma_buf *dmabuf = dma_buf_get(dmabuf_fd);
+
+- if (dmabuf == NULL)
++ if (!dmabuf)
+ return -EINVAL;
+
+ r = kfd_import_dmabuf_create_kfd_bo(dev, p, gpu_id, dmabuf,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 1a21c66..05503216 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -67,12 +67,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ break;
+ }
+
+- if (kq->mqd == NULL)
++ if (!kq->mqd)
+ return false;
+
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+
+- if (prop.doorbell_ptr == NULL) {
++ if (!prop.doorbell_ptr) {
+ pr_err("Failed to initialize doorbell");
+ goto err_get_kernel_doorbell;
+ }
+@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ kq->pq_gpu_addr = kq->pq->gpu_addr;
+
+ retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
+- if (retval == false)
++ if (!retval)
+ goto err_eop_allocate_vidmem;
+
+ retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 33367c3..894f303 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -162,7 +162,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ m->cp_hqd_iq_rptr = AQL_ENABLE;
+
+ *mqd = m;
+- if (gart_addr != NULL)
++ if (gart_addr)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+@@ -190,7 +190,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
+
+ *mqd = m;
+- if (gart_addr != NULL)
++ if (gart_addr)
+ *gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+ retval = mm->update_mqd(mm, m, q);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 8e31af3..c555066 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -170,7 +170,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ }
+
+ *mqd = m;
+- if (gart_addr != NULL)
++ if (gart_addr)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+@@ -345,7 +345,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ memset(m, 0, sizeof(struct v9_sdma_mqd));
+
+ *mqd = m;
+- if (gart_addr != NULL)
++ if (gart_addr)
+ *gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+ retval = mm->update_mqd(mm, m, q);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index eb7ce43..39ff448 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -173,7 +173,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ }
+
+ *mqd = m;
+- if (gart_addr != NULL)
++ if (gart_addr)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+@@ -366,7 +366,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ memset(m, 0, sizeof(struct vi_sdma_mqd));
+
+ *mqd = m;
+- if (gart_addr != NULL)
++ if (gart_addr)
+ *gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+ retval = mm->update_mqd(mm, m, q);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 5c235bd..8d5eee5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -95,7 +95,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
+
+ BUG_ON(!pm);
+ BUG_ON(pm->allocated);
+- BUG_ON(is_over_subscription == NULL);
++ BUG_ON(!is_over_subscription);
+
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+
+@@ -229,7 +229,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+ pm->dqm = dqm;
+ mutex_init(&pm->lock);
+ pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+- if (pm->priv_queue == NULL) {
++ if (!pm->priv_queue) {
+ mutex_destroy(&pm->lock);
+ return -ENOMEM;
+ }
+@@ -274,7 +274,7 @@ int pm_send_set_resources(struct packet_manager *pm,
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t),
+ (unsigned int **)&buffer);
+- if (buffer == NULL) {
++ if (!buffer) {
+ mutex_unlock(&pm->lock);
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ return -ENOMEM;
+@@ -344,7 +344,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+- if (buffer == NULL) {
++ if (!buffer) {
+ mutex_unlock(&pm->lock);
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ return -ENOMEM;
+@@ -367,7 +367,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+- if (buffer == NULL) {
++ if (!buffer) {
+ mutex_unlock(&pm->lock);
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index e34b686..40d2630 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -212,7 +212,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
+
+ BUG_ON(!kfd_process_wq);
+
+- if (thread->mm == NULL)
++ if (!thread->mm)
+ return ERR_PTR(-EINVAL);
+
+ /* Only the pthreads threading model is supported. */
+@@ -242,7 +242,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
+ {
+ struct kfd_process *process;
+
+- if (thread->mm == NULL)
++ if (!thread->mm)
+ return ERR_PTR(-EINVAL);
+
+ /* Only the pthreads threading model is supported. */
+@@ -455,9 +455,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ dev = pdd->dev;
+ mutex_lock(get_dbgmgr_mutex());
+
+- if ((dev != NULL) &&
+- (dev->dbgmgr) &&
+- (dev->dbgmgr->pasid == p->pasid)) {
++ if (dev && dev->dbgmgr && (dev->dbgmgr->pasid == p->pasid)) {
+
+ status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
+ if (status == 0) {
+@@ -649,7 +647,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
+ qpd->doorbell_bitmap =
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ BITS_PER_BYTE), GFP_KERNEL);
+- if (qpd->doorbell_bitmap == NULL)
++ if (!qpd->doorbell_bitmap)
+ return -ENOMEM;
+
+ /* Mask out any reserved doorbells */
+@@ -821,7 +819,7 @@ void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
+ struct kfd_process *p;
+ struct kfd_process_device *pdd;
+
+- BUG_ON(dev == NULL);
++ BUG_ON(!dev);
+
+ /*
+ * Look for the process that matches the pasid. If there is no such
+@@ -836,7 +834,7 @@ void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
+
+ mutex_lock(get_dbgmgr_mutex());
+
+- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) {
++ if (dev->dbgmgr && (dev->dbgmgr->pasid == p->pasid)) {
+
+ if (kfd_dbgmgr_unregister(dev->dbgmgr, p) == 0) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+@@ -895,8 +893,8 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ struct kfd_bo *buf_obj;
+ struct kfd_process *p;
+
+- BUG_ON(pdd == NULL);
+- BUG_ON(mem == NULL);
++ BUG_ON(!pdd);
++ BUG_ON(!mem);
+
+ p = pdd->process;
+
+@@ -931,7 +929,7 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ struct kfd_bo *kfd_process_device_find_bo(struct kfd_process_device *pdd,
+ int handle)
+ {
+- BUG_ON(pdd == NULL);
++ BUG_ON(!pdd);
+
+ if (handle < 0)
+ return NULL;
+@@ -967,7 +965,7 @@ void *kfd_process_find_bo_from_interval(struct kfd_process *p,
+ return NULL;
+ }
+
+- if (interval_tree_iter_next(it_node, start_addr, last_addr) != NULL) {
++ if (interval_tree_iter_next(it_node, start_addr, last_addr)) {
+ pr_err("0x%llx-0x%llx spans more than a single BO\n",
+ start_addr, last_addr);
+ return NULL;
+@@ -987,7 +985,7 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
+ struct kfd_bo *buf_obj;
+ struct kfd_process *p;
+
+- BUG_ON(pdd == NULL);
++ BUG_ON(!pdd);
+
+ p = pdd->process;
+
+@@ -1041,7 +1039,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ p = find_process_by_mm(mm);
+- if (p != NULL)
++ if (p)
+ kref_get(&p->ref);
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+@@ -1058,7 +1056,7 @@ int kfd_reserved_mem_mmap(struct kfd_process *process,
+ struct kfd_process_device *temp, *pdd = NULL;
+ struct qcm_process_device *qpd = NULL;
+
+- if (dev == NULL)
++ if (!dev)
+ return -EINVAL;
+ if (((vma->vm_end - vma->vm_start) != dev->cwsr_size) ||
+ (vma->vm_start & (PAGE_SIZE - 1)) ||
+@@ -1076,7 +1074,7 @@ int kfd_reserved_mem_mmap(struct kfd_process *process,
+ break;
+ }
+ }
+- if (qpd == NULL)
++ if (!qpd)
+ return -EINVAL;
+
+ qpd->cwsr_pages = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index d2c9389..afc990e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -107,7 +107,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
+ pqm->queue_slot_bitmap =
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ BITS_PER_BYTE), GFP_KERNEL);
+- if (pqm->queue_slot_bitmap == NULL)
++ if (!pqm->queue_slot_bitmap)
+ return -ENOMEM;
+ pqm->process = p;
+
+@@ -257,7 +257,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ break;
+ case KFD_QUEUE_TYPE_DIQ:
+ kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
+- if (kq == NULL) {
++ if (!kq) {
+ retval = -ENOMEM;
+ goto err_create_queue;
+ }
+@@ -322,7 +322,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ retval = 0;
+
+ pqn = get_queue_by_qid(pqm, qid);
+- if (pqn == NULL) {
++ if (!pqn) {
+ pr_err("Queue id does not match any known queue\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index da91ced..83e7d36 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -202,7 +202,7 @@ struct kfd_topology_device *kfd_create_topology_device(
+ struct kfd_topology_device *dev;
+
+ dev = kfd_alloc_struct(dev);
+- if (dev == NULL) {
++ if (!dev) {
+ pr_err("No memory to allocate a topology device");
+ return NULL;
+ }
+@@ -802,7 +802,7 @@ static int kfd_topology_update_sysfs(void)
+ int ret;
+
+ pr_info("Creating topology SYSFS entries\n");
+- if (sys_props.kobj_topology == NULL) {
++ if (!sys_props.kobj_topology) {
+ sys_props.kobj_topology =
+ kfd_alloc_struct(sys_props.kobj_topology);
+ if (!sys_props.kobj_topology)
+@@ -949,7 +949,7 @@ static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
+
+ if (amd_iommu_pc_supported()) {
+ props = kfd_alloc_struct(props);
+- if (props == NULL)
++ if (!props)
+ return -ENOMEM;
+ strcpy(props->block_name, "iommu");
+ props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
+@@ -969,7 +969,7 @@ static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
+ static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
+ {
+ /* Check if CPU only node. */
+- if (kdev->gpu == NULL) {
++ if (!kdev->gpu) {
+ /* Add system memory information */
+ dmi_walk(find_system_memory, kdev);
+ }
+@@ -1166,7 +1166,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+
+ down_write(&topology_lock);
+ list_for_each_entry(dev, &topology_device_list, list)
+- if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
++ if (!dev->gpu && (dev->node_props.simd_count > 0)) {
+ dev->gpu = gpu;
+ out_dev = dev;
+ break;
+@@ -1191,7 +1191,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
+ struct kfd_mem_properties *mem;
+ struct kfd_local_mem_info local_mem_info;
+
+- if (dev == NULL)
++ if (!dev)
+ return;
+
+ /* Currently, amdgpu driver (amdgpu_mc) deals only with GPUs with
+@@ -1211,7 +1211,7 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
+ {
+ struct kfd_iolink_properties *link;
+
+- if ((dev == NULL) || (dev->gpu == NULL))
++ if (!dev || !dev->gpu)
+ return;
+
+ /* GPU only creates direck links so apply flags setting to all */
+@@ -1419,7 +1419,7 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
+ {
+ int first_cpu_of_numa_node;
+
+- if (cpumask == NULL || cpumask == cpu_none_mask)
++ if (!cpumask || (cpumask == cpu_none_mask))
+ return -1;
+ first_cpu_of_numa_node = cpumask_first(cpumask);
+ if (first_cpu_of_numa_node >= nr_cpu_ids)
+--
+2.7.4
+